diff --git a/MANUAL.html b/MANUAL.html
index 39b3e2d33..7d68f62c5 100644
--- a/MANUAL.html
+++ b/MANUAL.html
@@ -13,13 +13,75 @@
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
+ pre > code.sourceCode { white-space: pre; position: relative; }
+ pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
+ pre > code.sourceCode > span:empty { height: 1.2em; }
+ code.sourceCode > span { color: inherit; text-decoration: inherit; }
+ div.sourceCode { margin: 1em 0; }
+ pre.sourceCode { margin: 0; }
+ @media screen {
+ div.sourceCode { overflow: auto; }
+ }
+ @media print {
+ pre > code.sourceCode { white-space: pre-wrap; }
+ pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
+ }
+ pre.numberSource code
+ { counter-reset: source-line 0; }
+ pre.numberSource code > span
+ { position: relative; left: -4em; counter-increment: source-line; }
+ pre.numberSource code > span > a:first-child::before
+ { content: counter(source-line);
+ position: relative; left: -1em; text-align: right; vertical-align: baseline;
+ border: none; display: inline-block;
+ -webkit-touch-callout: none; -webkit-user-select: none;
+ -khtml-user-select: none; -moz-user-select: none;
+ -ms-user-select: none; user-select: none;
+ padding: 0 4px; width: 4em;
+ color: #aaaaaa;
+ }
+ pre.numberSource { margin-left: 3em; border-left: 1px solid #aaaaaa; padding-left: 4px; }
+ div.sourceCode
+ { }
+ @media screen {
+ pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
+ }
+ code span.al { color: #ff0000; font-weight: bold; } /* Alert */
+ code span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */
+ code span.at { color: #7d9029; } /* Attribute */
+ code span.bn { color: #40a070; } /* BaseN */
+ code span.bu { } /* BuiltIn */
+ code span.cf { color: #007020; font-weight: bold; } /* ControlFlow */
+ code span.ch { color: #4070a0; } /* Char */
+ code span.cn { color: #880000; } /* Constant */
+ code span.co { color: #60a0b0; font-style: italic; } /* Comment */
+ code span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */
+ code span.do { color: #ba2121; font-style: italic; } /* Documentation */
+ code span.dt { color: #902000; } /* DataType */
+ code span.dv { color: #40a070; } /* DecVal */
+ code span.er { color: #ff0000; font-weight: bold; } /* Error */
+ code span.ex { } /* Extension */
+ code span.fl { color: #40a070; } /* Float */
+ code span.fu { color: #06287e; } /* Function */
+ code span.im { } /* Import */
+ code span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */
+ code span.kw { color: #007020; font-weight: bold; } /* Keyword */
+ code span.op { color: #666666; } /* Operator */
+ code span.ot { color: #007020; } /* Other */
+ code span.pp { color: #bc7a00; } /* Preprocessor */
+ code span.sc { color: #4070a0; } /* SpecialChar */
+ code span.ss { color: #bb6688; } /* SpecialString */
+ code span.st { color: #4070a0; } /* String */
+ code span.va { color: #19177c; } /* Variable */
+ code span.vs { color: #4070a0; } /* VerbatimString */
+ code span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */
Rclone syncs your files to cloud storage

@@ -91,6 +153,7 @@
Dreamhost
Dropbox
Enterprise File Fabric
+Fastmail Files
FTP
Google Cloud Storage
Google Drive
@@ -115,12 +178,15 @@
Minio
Nextcloud
OVH
+Blomp Cloud Storage
OpenDrive
OpenStack Swift
Oracle Cloud Storage Swift
Oracle Object Storage
ownCloud
pCloud
+Petabox
+PikPak
premiumize.me
put.io
QingStor
@@ -241,7 +307,10 @@ macOS cannot verify that this app is free from malware.
If you are planning to use the rclone mount feature then you will need to install the third party utility WinFsp also.
Windows package manager (Winget)
Winget comes pre-installed with the latest versions of Windows. If not, update the App Installer package from the Microsoft store.
+To install rclone
winget install Rclone.Rclone
+To uninstall rclone
+winget uninstall Rclone.Rclone --force
Chocolatey package manager
Make sure you have Choco installed
choco search rclone
@@ -289,10 +358,16 @@ rclone v1.49.1
# config on host at ~/.config/rclone/rclone.conf
# data on host at ~/data
+# add a remote interactively
+docker run --rm -it \
+ --volume ~/.config/rclone:/config/rclone \
+ --user $(id -u):$(id -g) \
+ rclone/rclone \
+ config
+
# make sure the config is ok by listing the remotes
docker run --rm \
--volume ~/.config/rclone:/config/rclone \
- --volume ~/data:/data:shared \
--user $(id -u):$(id -g) \
rclone/rclone \
listremotes
@@ -417,10 +492,11 @@ go build
Memory
Microsoft Azure Blob Storage
Microsoft OneDrive
-OpenStack Swift / Rackspace Cloudfiles / Memset Memstore
+OpenStack Swift / Rackspace Cloudfiles / Blomp Cloud Storage / Memset Memstore
OpenDrive
Oracle Object Storage
Pcloud
+PikPak
premiumize.me
put.io
QingStor
@@ -611,7 +687,7 @@ rclone --dry-run --min-size 100M delete remote:path
Checks the files in the source and destination match.
Synopsis
Checks the files in the source and destination match. It compares sizes and hashes (MD5 or SHA1) and logs a report of files that don't match. It doesn't alter the source or destination.
-For the crypt remote there is a dedicated command, cryptcheck, that are able to check the checksums of the crypted files.
+For the crypt remote there is a dedicated command, cryptcheck, that are able to check the checksums of the encrypted files.
If you supply the --size-only
flag, it will only compare the sizes not the hashes as well. Use this for a quick check.
If you supply the --download
flag, it will download the data from both remotes and check them against each other on the fly. This can be useful for remotes that don't support hashes or if you really want to check all the data.
If you supply the --checkfile HASH
flag with a valid hash name, the source:path
must point to a text file in the SUM format.
@@ -625,6 +701,7 @@ rclone --dry-run --min-size 100M delete remote:path
`* path` means path was present in source and destination but different.
! path
means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the --checkers=N option for more information.
rclone check source:path dest:path [flags]
Options
-C, --checkfile string Treat source:path as a SUM file with hashes of given type
@@ -785,7 +862,7 @@ rclone --dry-run --min-size 100M delete remote:path
Counts objects in the path and calculates the total size. Prints the result to standard output.
By default the output is in human-readable format, but shows values in both human-readable format as well as the raw numbers (global option --human-readable
is not considered). Use option --json
to format output as JSON instead.
Recurses by default, use --max-depth 1
to stop the recursion.
-Some backends do not always provide file sizes, see for example Google Photos and Google Drive. Rclone will then show a notice in the log indicating how many such files were encountered, and count them in as empty files in the output of the size command.
+Some backends do not always provide file sizes, see for example Google Photos and Google Docs. Rclone will then show a notice in the log indicating how many such files were encountered, and count them in as empty files in the output of the size command.
rclone size remote:path [flags]
Options
-h, --help help for size
@@ -1043,14 +1120,22 @@ rclone backend help <backendname>
Or like this to output any .txt files in dir or its subdirectories.
rclone --include "*.txt" cat remote:path/to/dir
Use the --head
flag to print characters only at the start, --tail
for the end and --offset
and --count
to print a section in the middle. Note that if offset is negative it will count from the end, so --offset -1 --count 1
is equivalent to --tail 1
.
+Use the --separator
flag to print a separator value between files. Be sure to shell-escape special characters. For example, to print a newline between files, use:
+
rclone cat remote:path [flags]
Options
- --count int Only print N characters (default -1)
- --discard Discard the output instead of printing
- --head int Only print the first N characters
- -h, --help help for cat
- --offset int Start printing at offset N (or from end if -ve)
- --tail int Only print the last N characters
+ --count int Only print N characters (default -1)
+ --discard Discard the output instead of printing
+ --head int Only print the first N characters
+ -h, --help help for cat
+ --offset int Start printing at offset N (or from end if -ve)
+ --separator string Separator to use between objects when printing multiple files
+ --tail int Only print the last N characters
See the global flags page for global options not listed here.
SEE ALSO
@@ -1072,6 +1157,7 @@ rclone backend help <backendname>
- `* path` means path was present in source and destination but different.
! path
means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the --checkers=N option for more information.
rclone checksum <hash> sumfile src:path [flags]
Options
--combined string Make a combined report of changes to this file
@@ -1089,98 +1175,88 @@ rclone backend help <backendname>
rclone - Show help for rclone commands, flags and backends.
rclone completion
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
Synopsis
-Generate the autocompletion script for rclone for the specified shell. See each sub-command's help for details on how to use the generated script.
+Generates a shell completion script for rclone. Run with --help
to list the supported shells.
Options
-h, --help help for completion
See the global flags page for global options not listed here.
SEE ALSO
rclone completion bash
-Generate the autocompletion script for bash
+Output bash completion script for rclone.
Synopsis
-Generate the autocompletion script for the bash shell.
-This script depends on the 'bash-completion' package. If it is not installed already, you can install it via your OS's package manager.
-To load completions in your current shell session:
-source <(rclone completion bash)
-To load completions for every new session, execute once:
-Linux:
-rclone completion bash > /etc/bash_completion.d/rclone
-macOS:
-rclone completion bash > $(brew --prefix)/etc/bash_completion.d/rclone
-You will need to start a new shell for this setup to take effect.
-rclone completion bash
+Generates a bash shell autocompletion script for rclone.
+This writes to /etc/bash_completion.d/rclone by default so will probably need to be run with sudo or as root, e.g.
+sudo rclone genautocomplete bash
+Logout and login again to use the autocompletion scripts, or source them directly
+. /etc/bash_completion
+If you supply a command line argument the script will be written there.
+If output_file is "-", then the output will be written to stdout.
+rclone completion bash [output_file] [flags]
Options
- -h, --help help for bash
- --no-descriptions disable completion descriptions
+ -h, --help help for bash
See the global flags page for global options not listed here.
SEE ALSO
rclone completion fish
-Generate the autocompletion script for fish
+Output fish completion script for rclone.
Synopsis
-Generate the autocompletion script for the fish shell.
-To load completions in your current shell session:
-rclone completion fish | source
-To load completions for every new session, execute once:
-rclone completion fish > ~/.config/fish/completions/rclone.fish
-You will need to start a new shell for this setup to take effect.
-rclone completion fish [flags]
+Generates a fish autocompletion script for rclone.
+This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g.
+sudo rclone genautocomplete fish
+Logout and login again to use the autocompletion scripts, or source them directly
+. /etc/fish/completions/rclone.fish
+If you supply a command line argument the script will be written there.
+If output_file is "-", then the output will be written to stdout.
+rclone completion fish [output_file] [flags]
Options
- -h, --help help for fish
- --no-descriptions disable completion descriptions
+ -h, --help help for fish
See the global flags page for global options not listed here.
SEE ALSO
rclone completion powershell
Generate the autocompletion script for powershell
-Synopsis
+Synopsis
Generate the autocompletion script for powershell.
To load completions in your current shell session:
rclone completion powershell | Out-String | Invoke-Expression
To load completions for every new session, add the output of the above command to your powershell profile.
rclone completion powershell [flags]
-Options
+Options
-h, --help help for powershell
--no-descriptions disable completion descriptions
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone completion zsh
-Generate the autocompletion script for zsh
+Output zsh completion script for rclone.
Synopsis
-Generate the autocompletion script for the zsh shell.
-If shell completion is not already enabled in your environment you will need to enable it. You can execute the following once:
-echo "autoload -U compinit; compinit" >> ~/.zshrc
-To load completions in your current shell session:
-source <(rclone completion zsh); compdef _rclone rclone
-To load completions for every new session, execute once:
-Linux:
-rclone completion zsh > "${fpath[1]}/_rclone"
-macOS:
-rclone completion zsh > $(brew --prefix)/share/zsh/site-functions/_rclone
-You will need to start a new shell for this setup to take effect.
-rclone completion zsh [flags]
+Generates a zsh autocompletion script for rclone.
+This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g.
+sudo rclone genautocomplete zsh
+Logout and login again to use the autocompletion scripts, or source them directly
+autoload -U compinit && compinit
+If you supply a command line argument the script will be written there.
+If output_file is "-", then the output will be written to stdout.
+rclone completion zsh [output_file] [flags]
Options
- -h, --help help for zsh
- --no-descriptions disable completion descriptions
+ -h, --help help for zsh
See the global flags page for global options not listed here.
SEE ALSO
rclone config create
Create a new remote with name, type and options.
@@ -1508,9 +1584,9 @@ if src is directory
rclone - Show help for rclone commands, flags and backends.
rclone cryptcheck
-Cryptcheck checks the integrity of a crypted remote.
+Cryptcheck checks the integrity of an encrypted remote.
Synopsis
-rclone cryptcheck checks a remote against a crypted remote. This is the equivalent of running rclone check, but able to check the checksums of the crypted remote.
+rclone cryptcheck checks a remote against a crypted remote. This is the equivalent of running rclone check, but able to check the checksums of the encrypted remote.
For it to work the underlying remote of the cryptedremote must support some kind of checksum.
It works by reading the nonce from each file on the cryptedremote: and using that to encrypt each file on the remote:. It then checks the checksum of the underlying file on the cryptedremote: against the checksum of the file it has just encrypted.
Use it like this
@@ -1528,6 +1604,7 @@ if src is directory
`* path` means path was present in source and destination but different.
! path
means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the --checkers=N option for more information.
rclone cryptcheck remote:path cryptedremote:path [flags]
Options
--combined string Make a combined report of changes to this file
@@ -1576,12 +1653,12 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2
rclone genautocomplete
Output completion script for a given shell.
-Synopsis
+Synopsis
Generates a shell completion script for rclone. Run with --help
to list the supported shells.
-Options
+Options
-h, --help help for genautocomplete
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
- rclone genautocomplete bash - Output bash completion script for rclone.
@@ -1590,7 +1667,7 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2
rclone genautocomplete bash
Output bash completion script for rclone.
-Synopsis
+Synopsis
Generates a bash shell autocompletion script for rclone.
This writes to /etc/bash_completion.d/rclone by default so will probably need to be run with sudo or as root, e.g.
sudo rclone genautocomplete bash
@@ -1599,16 +1676,16 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2
If you supply a command line argument the script will be written there.
If output_file is "-", then the output will be written to stdout.
rclone genautocomplete bash [output_file] [flags]
-Options
+Options
-h, --help help for bash
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone genautocomplete fish
Output fish completion script for rclone.
-Synopsis
+Synopsis
Generates a fish autocompletion script for rclone.
This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g.
sudo rclone genautocomplete fish
@@ -1617,16 +1694,16 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2
If you supply a command line argument the script will be written there.
If output_file is "-", then the output will be written to stdout.
rclone genautocomplete fish [output_file] [flags]
-Options
+Options
-h, --help help for fish
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone genautocomplete zsh
Output zsh completion script for rclone.
-Synopsis
+Synopsis
Generates a zsh autocompletion script for rclone.
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g.
sudo rclone genautocomplete zsh
@@ -1635,10 +1712,10 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2
If you supply a command line argument the script will be written there.
If output_file is "-", then the output will be written to stdout.
rclone genautocomplete zsh [output_file] [flags]
-Options
+Options
-h, --help help for zsh
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
@@ -1710,7 +1787,7 @@ rclone link --expire 1d remote:path/to/file
rclone - Show help for rclone commands, flags and backends.
rclone listremotes
-List all the remotes in the config file.
+List all the remotes in the config file and defined in environment variables.
Synopsis
rclone listremotes lists all the available remotes from the config file.
When used with the --long
flag it lists the types too.
@@ -1944,6 +2021,11 @@ rclone mount remote:path/to/files * --volname \\cloud\remote
Note that mapping to a directory path, instead of a drive letter, does not suffer from the same limitations.
Mounting on macOS
Mounting on macOS can be done either via macFUSE (also known as osxfuse) or FUSE-T. macFUSE is a traditional FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which "mounts" via an NFSv4 local server.
+macFUSE Notes
+If installing macFUSE using dmg packages from the website, rclone will locate the macFUSE libraries without any further intervention. If however, macFUSE is installed using the macports package manager, the following addition steps are required.
+sudo mkdir /usr/local/lib
+cd /usr/local/lib
+sudo ln -s /opt/local/lib/libfuse.2.dylib
FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are current as of FUSE-T version 1.0.14.
ModTime update on read
@@ -1985,17 +2067,16 @@ rclone mount remote:path/to/files * --volname \\cloud\remote
or create systemd mount units:
# /etc/systemd/system/mnt-data.mount
[Unit]
-After=network-online.target
+Description=Mount for /mnt/data
[Mount]
Type=rclone
What=sftp1:subdir
Where=/mnt/data
-Options=rw,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
optionally accompanied by systemd automount unit
# /etc/systemd/system/mnt-data.automount
[Unit]
-After=network-online.target
-Before=remote-fs.target
+Description=AutoMount for /mnt/data
[Automount]
Where=/mnt/data
TimeoutIdleSec=600
@@ -2039,14 +2120,15 @@ WantedBy=multi-user.target
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -2148,6 +2230,7 @@ WantedBy=multi-user.target
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for mount
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -2159,7 +2242,7 @@ WantedBy=multi-user.target
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -2314,7 +2397,7 @@ ffmpeg - | rclone rcat remote:path/to/file
rcat will try to upload small files in a single request, which is usually more efficient than the streaming/chunked upload endpoints, which use multiple requests. Exact behaviour depends on the remote. What is considered a small file may be set through --streaming-upload-cutoff
. Uploading only starts after the cutoff is reached or if the file ends before that. The data must fit into RAM. The cutoff needs to be small enough to adhere the limits of your remote, please see there. Generally speaking, setting this cutoff too high will decrease your performance.
Use the --size
flag to preallocate the file in advance at the remote end and actually stream it, even if remote backend doesn't support streaming.
--size
should be the exact size of the input stream in bytes. If the size of the stream is different in length to the --size
passed in then the transfer will likely fail.
-Note that the upload can also not be retried because the data is not kept around until the upload succeeds. If you need to transfer a lot of data, you're better off caching locally and then rclone move
it to the destination.
+Note that the upload cannot be retried because the data is not stored. If the backend supports multipart uploading then individual chunks can be retried. If you need to transfer a lot of data, you may be better off caching it locally and then rclone move
it to the destination which can use retries.
rclone rcat remote:path [flags]
Options
-h, --help help for rcat
@@ -2332,19 +2415,19 @@ ffmpeg - | rclone rcat remote:path/to/file
If you pass in a path to a directory, rclone will serve that directory for GET requests on the URL passed in. It will also open the URL in the browser when rclone is run.
See the rc documentation for more info on the rc flags.
Server options
-Use --addr
to specify which IP address and port the server should listen on, eg --addr 1.2.3.4:8000
or --addr :8080
to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
-If you set --addr
to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
+Use --rc-addr
to specify which IP address and port the server should listen on, eg --rc-addr 1.2.3.4:8000
or --rc-addr :8080
to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
+If you set --rc-addr
to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
You can use a unix socket by setting the url to unix:///path/to/socket
or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions.
---addr
may be repeated to listen on multiple IPs/ports/sockets.
---server-read-timeout
and --server-write-timeout
can be used to control the timeouts on the server. Note that this is the total time for a transfer.
---max-header-bytes
controls the maximum number of bytes the server will accept in the HTTP header.
---baseurl
controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --baseurl "/rclone"
then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --baseurl
, so --baseurl "rclone"
, --baseurl "/rclone"
and --baseurl "/rclone/"
are all treated identically.
+--rc-addr
may be repeated to listen on multiple IPs/ports/sockets.
+--rc-server-read-timeout
and --rc-server-write-timeout
can be used to control the timeouts on the server. Note that this is the total time for a transfer.
+--rc-max-header-bytes
controls the maximum number of bytes the server will accept in the HTTP header.
+--rc-baseurl
controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --rc-baseurl "/rclone"
then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --rc-baseurl
, so --rc-baseurl "rclone"
, --rc-baseurl "/rclone"
and --rc-baseurl "/rclone/"
are all treated identically.
TLS (SSL)
-By default this will serve over http. If you want you can serve over https. You will need to supply the --cert
and --key
flags. If you wish to do client side certificate validation then you will need to supply --client-ca
also.
---cert
should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --key
should be the PEM encoded private key and --client-ca
should be the PEM encoded client certificate authority certificate.
---min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
+By default this will serve over http. If you want you can serve over https. You will need to supply the --rc-cert
and --rc-key
flags. If you wish to do client side certificate validation then you will need to supply --rc-client-ca
also.
+--rc-cert
should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --krc-ey
should be the PEM encoded private key and --rc-client-ca
should be the PEM encoded client certificate authority certificate.
+--rc-min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
Template
---template
allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to be used within the template to server pages:
+--rc-template
allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to be used within the template to server pages:
@@ -2425,15 +2508,16 @@ ffmpeg - | rclone rcat remote:path/to/file
Authentication
By default this will serve files without needing a login.
-You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user
and --pass
flags.
-Use --htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
+You can either use an htpasswd file which can take lots of users, or set a single username and password with the --rc-user
and --rc-pass
flags.
+If no static users are configured by either of the above methods, and client certificates are required by the --client-ca
flag passed to the server, the client certificate common name will be considered as the username.
+Use --rc-htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
To create an htpasswd file:
touch htpasswd
htpasswd -B htpasswd user
htpasswd -B htpasswd anotherUser
The password file can be updated while rclone is running.
-Use --realm
to set the authentication realm.
-Use --salt
to change the password hashing salt from the default.
+Use --rc-realm
to set the authentication realm.
+Use --rc-salt
to change the password hashing salt from the default.
rclone rcd <path to files to serve>* [flags]
Options
-h, --help help for rcd
@@ -2539,14 +2623,15 @@ htpasswd -B htpasswd anotherUser
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -2647,7 +2732,7 @@ htpasswd -B htpasswd anotherUser
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -2702,14 +2787,15 @@ htpasswd -B htpasswd anotherUser
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -2813,6 +2899,7 @@ htpasswd -B htpasswd anotherUser
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for docker
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -2827,7 +2914,7 @@ htpasswd -B htpasswd anotherUser
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -2884,14 +2971,15 @@ htpasswd -B htpasswd anotherUser
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -3025,7 +3113,7 @@ htpasswd -B htpasswd anotherUser
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default "anonymous")
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -3146,6 +3234,7 @@ htpasswd -B htpasswd anotherUser
Authentication
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user
and --pass
flags.
+If no static users are configured by either of the above methods, and client certificates are required by the --client-ca
flag passed to the server, the client certificate common name will be considered as the username.
Use --htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
To create an htpasswd file:
touch htpasswd
@@ -3180,14 +3269,15 @@ htpasswd -B htpasswd anotherUser
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -3329,7 +3419,7 @@ htpasswd -B htpasswd anotherUser
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -3406,6 +3496,7 @@ $ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
Authentication
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user
and --pass
flags.
+If no static users are configured by either of the above methods, and client certificates are required by the --client-ca
flag passed to the server, the client certificate common name will be considered as the username.
Use --htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
To create an htpasswd file:
touch htpasswd
@@ -3456,7 +3547,7 @@ htpasswd -B htpasswd anotherUser
If --stdio
is specified, rclone will serve SFTP over stdio, which can be used with sshd via ~/.ssh/authorized_keys, for example:
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
On the client you need to set --transfers 1
when using --stdio
. Otherwise multiple instances of the rclone server are started by OpenSSH which can lead to "corrupted on transfer" errors. This is the case because the client chooses indiscriminately which server to send commands to while the servers all have different views of the state of the filing system.
-The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from beeing used. Omitting "restrict" and using --sftp-path-override
to enable checksumming is possible but less secure and you could use the SFTP server provided by OpenSSH in this case.
+The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from being used. Omitting "restrict" and using --sftp-path-override
to enable checksumming is possible but less secure and you could use the SFTP server provided by OpenSSH in this case.
VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
@@ -3483,14 +3574,15 @@ htpasswd -B htpasswd anotherUser
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -3624,7 +3716,7 @@ htpasswd -B htpasswd anotherUser
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -3751,6 +3843,7 @@ htpasswd -B htpasswd anotherUser
Authentication
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user
and --pass
flags.
+If no static users are configured by either of the above methods, and client certificates are required by the --client-ca
flag passed to the server, the client certificate common name will be considered as the username.
Use --htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
To create an htpasswd file:
touch htpasswd
@@ -3785,14 +3878,15 @@ htpasswd -B htpasswd anotherUser
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache.
+If using --vfs-cache-max-size
note that the cache may exceed this size for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
@@ -3936,7 +4030,7 @@ htpasswd -B htpasswd anotherUser
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -3978,7 +4072,7 @@ htpasswd -B htpasswd anotherUser
Run a test command
Synopsis
Rclone test is used to run test commands.
-Select which test comand you want with the subcommand, eg
+Select which test command you want with the subcommand, eg
rclone test memory remote:
Each subcommand has its own options which you can see in their help.
NB Be careful running these commands, they may do strange things so reading their documentation first is recommended.
@@ -4479,7 +4573,7 @@ rclone sync --interactive /path/to/files remote:current-backup
Eg rclone --checksum sync s3:/bucket swift:/bucket
would run much quicker than without the --checksum
flag.
When using this flag, rclone won't update mtimes of remote files if they are incorrect as it would normally.
--color WHEN
-Specifiy when colors (and other ANSI codes) should be added to the output.
+Specify when colors (and other ANSI codes) should be added to the output.
AUTO
(default) only allows ANSI codes when the output is a terminal
NEVER
never allow ANSI codes
ALWAYS
always add ANSI codes, regardless of the output format (terminal or file)
@@ -4525,6 +4619,8 @@ pass = PDPcQVVjVtzFY-GTdDFozqBhTdsPg3qH
Note that passwords are in obscured form. Also, many storage systems uses token-based authentication instead of passwords, and this requires additional steps. It is easier, and safer, to use the interactive command rclone config
instead of manually editing the configuration file.
The configuration file will typically contain login information, and should therefore have restricted permissions so that only the current user can read it. Rclone tries to ensure this when it writes the file. You may also choose to encrypt the file.
When token-based authentication are used, the configuration file must be writable, because rclone needs to update the tokens inside it.
+To reduce risk of corrupting an existing configuration file, rclone will not write directly to it when saving changes. Instead it will first write to a new, temporary, file. If a configuration file already existed, it will (on Unix systems) try to mirror its permissions to the new file. Then it will rename the existing file to a temporary name as backup. Next, rclone will rename the new file to the correct name, before finally cleaning up by deleting the backup file.
+If the configuration file path used by rclone is a symbolic link, then this will be evaluated and rclone will write to the resolved path, instead of overwriting the symbolic link. Temporary files used in the process (described above) will be written to the same parent directory as that of the resolved configuration file, but if this directory is also a symbolic link it will not be resolved and the temporary files will be written to the location of the directory symbolic link.
--contimeout=TIME
Set the connection timeout. This should be in go time format which looks like 5s
for 5 seconds, 10m
for 10 minutes, or 3h30m
.
The connection timeout is the amount of time rclone will wait for a connection to go through to a remote object storage system. It is 1m
by default.
@@ -4535,13 +4631,21 @@ pass = PDPcQVVjVtzFY-GTdDFozqBhTdsPg3qH
--dedupe-mode MODE
Mode to run dedupe command in. One of interactive
, skip
, first
, newest
, oldest
, rename
. The default is interactive
.
See the dedupe command for more information as to what these options mean.
+--default-time TIME
+If a file or directory does have a modification time rclone can read then rclone will display this fixed time instead.
+The default is 2000-01-01 00:00:00 UTC
. This can be configured in any of the ways shown in the time or duration options.
+For example --default-time 2020-06-01
to set the default time to the 1st of June 2020 or --default-time 0s
to set the default time to the time rclone started up.
--disable FEATURE,FEATURE,...
This disables a comma separated list of optional features. For example to disable server-side move and server-side copy use:
--disable move,copy
The features can be put in any case.
To see a list of which features can be disabled use:
--disable help
+The features a remote has can be seen in JSON format with:
+rclone backend features remote:
See the overview features and optional features to get an idea of which feature does what.
+Note that some features can be set to true
if they are true
/false
feature flag features by prefixing them with !
. For example the CaseInsensitive
feature can be forced to false
with --disable CaseInsensitive
and forced to true
with --disable '!CaseInsensitive'
. In general it isn't a good idea doing this but it may be useful in extremis.
+(Note that !
is a shell command which you will need to escape with single quotes or a backslash on unix like platforms.)
This flag can be useful for debugging and in exceptional circumstances (e.g. Google Drive limiting the total volume of Server Side Copies to 100 GiB/day).
--disable-http2
This stops rclone from trying to use HTTP/2 if available. This can sometimes speed up transfers due to a problem in the Go standard library.
@@ -4610,6 +4714,22 @@ See the dedupe command for more information as to what these options mean.
With this option set, files will be created and deleted as requested, but existing files will never be updated. If an existing file does not match between the source and destination, rclone will give the error Source and destination exist but do not match: immutable file modified
.
Note that only commands which transfer files (e.g. sync
, copy
, move
) are affected by this behavior, and only modification is disallowed. Files may still be deleted explicitly (e.g. delete
, purge
) or implicitly (e.g. sync
, move
). Use copy --immutable
if it is desired to avoid deletion as well as modification.
This can be useful as an additional layer of protection for immutable or append-only data sets (notably backup archives), where modification implies corruption and should not be propagated.
+--inplace
+The --inplace
flag changes the behaviour of rclone when uploading files to some backends (backends with the PartialUploads
feature flag set) such as:
+
+Without --inplace
(the default) rclone will first upload to a temporary file with an extension like this where XXXXXX
represents a random string.
+original-file-name.XXXXXX.partial
+(rclone will make sure the final name is no longer than 100 characters by truncating the original-file-name
part if necessary).
+When the upload is complete, rclone will rename the .partial
file to the correct name, overwriting any existing file at that point. If the upload fails then the .partial
file will be deleted.
+This prevents other users of the backend from seeing partially uploaded files in their new names and prevents overwriting the old file until the new one is completely uploaded.
+If the --inplace
flag is supplied, rclone will upload directly to the final name without creating a .partial
file.
+This means that an incomplete file will be visible in the directory listings while the upload is in progress and any existing files will be overwritten as soon as the upload starts. If the transfer fails then the file will be deleted. This can cause data loss of the existing file if the transfer fails.
+Note that on the local file system if you don't use --inplace
hard links (Unix only) will be broken. And if you do use --inplace
you won't be able to update in use executables.
+Note also that versions of rclone prior to v1.63.0 behave as if the --inplace
flag is always supplied.
-i, --interactive
This flag can be used to tell rclone that you wish a manual confirmation before destructive operations.
It is recommended that you use this flag while learning rclone especially with rclone sync
.
@@ -4690,6 +4810,11 @@ y/n/s/!/q> n
When checking whether a file has been modified, this is the maximum allowed time difference that a file can have and still be considered equivalent.
The default is 1ns
unless this is overridden by a remote. For example OS X only stores modification times to the nearest second so if you are reading and writing to an OS X filing system this will be 1s
by default.
This command line flag allows you to override that computed default.
+--multi-thread-write-buffer-size=SIZE
+When downloading with multiple threads, rclone will buffer SIZE bytes in memory before writing to disk for each thread.
+This can improve performance if the underlying filesystem does not deal well with a lot of small writes in different positions of the file, so if you see downloads being limited by disk write speed, you might want to experiment with different values. Specially for magnetic drives and remote file systems a higher value can be useful.
+Nevertheless, the default of 128k
should be fine for almost all use cases, so before changing it ensure that network is not really your bottleneck.
+As a final hint, size is not the only factor: block size (or similar concept) can have an impact. In one case, we observed that exact multiples of 16k performed much better than other values.
--multi-thread-cutoff=SIZE
When downloading files to the local backend above this size, rclone will use multiple threads to download the file (default 250M).
Rclone preallocates the file (using fallocate(FALLOC_FL_KEEP_SIZE)
on unix or NTSetInformationFile
on Windows both of which takes no time) then each thread writes directly into the file at the correct place. This means that rclone won't create fragmented or sparse files and there won't be any assembly time at the end of the transfer.
@@ -4843,6 +4968,7 @@ y/n/s/!/q> n
--suffix-keep-extension
When using --suffix
, setting this causes rclone put the SUFFIX before the extension of the files that it backs up rather than after.
So let's say we had --suffix -2019-01-01
, without the flag file.txt
would be backed up to file.txt-2019-01-01
and with the flag it would be backed up to file-2019-01-01.txt
. This can be helpful to make sure the suffixed files can still be opened.
+If a file has two (or more) extensions and the second (or subsequent) extension is recognised as a valid mime type, then the suffix will go before that extension. So file.tar.gz
would be backed up to file-2019-01-01.tar.gz
whereas file.badextension.gz
would be backed up to file.badextension-2019-01-01.gz
.
--syslog
On capable OSes (not Windows or Plan9) send all log output to syslog.
This can be useful for running rclone in a script or rclone mount
.
@@ -5296,7 +5422,7 @@ ASCII character classes (e.g. [[:alnum:]], [[:alpha:]], [[:punct:]], [[:xdigit:]
Not
{{start.*end\.jpg}}
Which will match a directory called start
with a file called end.jpg
in it as the .*
will match /
characters.
-Note that you can use -vv --dump filters
to show the filter patterns in regexp format - rclone implements the glob patters by transforming them into regular expressions.
+Note that you can use -vv --dump filters
to show the filter patterns in regexp format - rclone implements the glob patterns by transforming them into regular expressions.
Filter pattern examples
@@ -6052,7 +6178,7 @@ rclone rc cache/expire remote=/ withData=true
See the config dump command for more information on the above.
Authentication is required for this call.
-config/listremotes: Lists the remotes in the config file.
+config/listremotes: Lists the remotes in the config file and defined in environment variables.
Returns - remotes - array of remote names
See the listremotes command for more information on the above.
Authentication is required for this call.
@@ -6440,9 +6566,9 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"Cache
operations/copyfile: Copy a file from source remote to destination remote
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
Authentication is required for this call.
@@ -6617,9 +6743,9 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"Cache
operations/movefile: Move a file from source remote to destination remote
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
Authentication is required for this call.
@@ -7292,6 +7418,15 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
+PikPak |
+MD5 |
+R |
+No |
+No |
+R |
+- |
+
+
premiumize.me |
- |
- |
@@ -7300,7 +7435,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
put.io |
CRC-32 |
R/W |
@@ -7309,7 +7444,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
QingStor |
MD5 |
- ⁹ |
@@ -7318,7 +7453,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Seafile |
- |
- |
@@ -7327,7 +7462,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
SFTP |
MD5, SHA1 ² |
R/W |
@@ -7336,7 +7471,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Sia |
- |
- |
@@ -7345,7 +7480,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
SMB |
- |
- |
@@ -7354,7 +7489,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
SugarSync |
- |
- |
@@ -7363,7 +7498,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Storj |
- |
R |
@@ -7372,7 +7507,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Uptobox |
- |
- |
@@ -7381,7 +7516,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
WebDAV |
MD5, SHA1 ³ |
R ⁴ |
@@ -7390,7 +7525,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Yandex Disk |
MD5 |
R/W |
@@ -7399,7 +7534,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
Zoho WorkDrive |
- |
- |
@@ -7408,7 +7543,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
The local filesystem |
All |
R/W |
@@ -7422,8 +7557,8 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Notes
¹ Dropbox supports its own custom hash. This is an SHA256 sum of all the 4 MiB block SHA256s.
² SFTP supports checksums if the same login has shell access and md5sum
or sha1sum
as well as echo
are in the remote's PATH.
-³ WebDAV supports hashes when used with Owncloud and Nextcloud only.
-⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
+³ WebDAV supports hashes when used with Fastmail Files. Owncloud and Nextcloud only.
+⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and Nextcloud only.
⁵ QuickXorHash is Microsoft's own hash.
⁶ Mail.ru uses its own modified SHA1 hash
⁷ pCloud only supports SHA1 (not MD5) in its EU region
@@ -7988,7 +8123,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
No |
No |
-Yes |
+No |
No |
No |
Yes |
@@ -8267,6 +8402,19 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
+PikPak |
+Yes |
+Yes |
+Yes |
+Yes |
+Yes |
+No |
+No |
+Yes |
+Yes |
+Yes |
+
+
premiumize.me |
Yes |
No |
@@ -8279,7 +8427,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
put.io |
Yes |
No |
@@ -8292,7 +8440,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
QingStor |
No |
Yes |
@@ -8305,7 +8453,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
Seafile |
Yes |
Yes |
@@ -8318,7 +8466,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
SFTP |
No |
No |
@@ -8331,7 +8479,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
Sia |
No |
No |
@@ -8344,7 +8492,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
SMB |
No |
No |
@@ -8357,7 +8505,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
SugarSync |
Yes |
Yes |
@@ -8370,7 +8518,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
Storj |
Yes ☨ |
Yes |
@@ -8383,7 +8531,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
Uptobox |
No |
Yes |
@@ -8396,7 +8544,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
WebDAV |
Yes |
Yes |
@@ -8409,7 +8557,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
Yandex Disk |
Yes |
Yes |
@@ -8422,7 +8570,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
Zoho WorkDrive |
Yes |
Yes |
@@ -8435,7 +8583,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
The local filesystem |
Yes |
No |
@@ -8484,716 +8632,746 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
This describes the global flags available to every rclone command split into two groups, non backend and backend flags.
Non Backend Flags
These flags are available for every command.
- --ask-password Allow prompt for password for encrypted configuration (default true)
- --auto-confirm If enabled, do not request console confirmation
- --backup-dir string Make backups into hierarchy based in DIR
- --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
- --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
- --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --ca-cert stringArray CA certificate used to verify servers
- --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
- --check-first Do all the checks before starting transfers
- --checkers int Number of checkers to run in parallel (default 8)
- -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
- --client-cert string Client SSL certificate (PEM) for mutual TLS auth
- --client-key string Client SSL private key (PEM) for mutual TLS auth
- --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
- --compare-dest stringArray Include additional comma separated server-side paths during comparison
- --config string Config file (default "$HOME/.config/rclone/rclone.conf")
- --contimeout Duration Connect timeout (default 1m0s)
- --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
- --cpuprofile string Write cpu profile to file
- --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
- --delete-after When synchronizing, delete files on destination after transferring (default)
- --delete-before When synchronizing, delete files on destination before transferring
- --delete-during When synchronizing, delete files during transfer
- --delete-excluded Delete files on dest excluded from sync
- --disable string Disable a comma separated list of features (use --disable help to see a list)
- --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
- --disable-http2 Disable HTTP/2 in the global transport
- -n, --dry-run Do a trial run with no permanent changes
- --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
- --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
- --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
- --dump-headers Dump HTTP headers - may contain sensitive info
- --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
- --fast-list Use recursive list if available; uses more memory but fewer transactions
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
- --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
- --header stringArray Set HTTP header for all transactions
- --header-download stringArray Set HTTP header for download transactions
- --header-upload stringArray Set HTTP header for upload transactions
- --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
- --ignore-case Ignore case in filters (case insensitive)
- --ignore-case-sync Ignore case when synchronizing
- --ignore-checksum Skip post copy check of checksums
- --ignore-errors Delete even if there are I/O errors
- --ignore-existing Skip all files that exist on destination
- --ignore-size Ignore size when skipping use mod-time or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
- --immutable Do not modify files, fail if existing files have been modified
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- -i, --interactive Enable interactive mode
- --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
- --log-file string Log everything to this file
- --log-format string Comma separated list of log format options (default "date,time")
- --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
- --log-systemd Activate systemd integration for the logger
- --low-level-retries int Number of low level retries to do (default 10)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
- --max-delete int When synchronizing, limit the number of deletes (default -1)
- --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
- --max-transfer SizeSuffix Maximum size of data to transfer (default off)
- --memprofile string Write memory profile to file
- -M, --metadata If set, preserve metadata when copying objects
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --metadata-set stringArray Add metadata key=value when uploading
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
- --modify-window Duration Max time diff to be considered the same (default 1ns)
- --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
- --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
- --no-check-certificate Do not verify the server SSL certificate (insecure)
- --no-check-dest Don't check the destination, copy regardless
- --no-console Hide console window (supported on Windows only)
- --no-gzip-encoding Don't set Accept-Encoding: gzip
- --no-traverse Don't traverse destination file system on copy
- --no-unicode-normalization Don't normalize unicode characters in filenames
- --no-update-modtime Don't update destination mod-time if files identical
- --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
- --password-command SpaceSepList Command for supplying password for encrypted configuration
- -P, --progress Show progress during transfer
- --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
- -q, --quiet Print as little stuff as possible
- --rc Enable the remote control server
- --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
- --rc-allow-origin string Set the allowed origin for CORS
- --rc-baseurl string Prefix for URLs - leave blank for root
- --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
- --rc-client-ca string Client certificate authority to verify clients with
- --rc-enable-metrics Enable prometheus metrics on /metrics
- --rc-files string Path to local files to serve on the HTTP server
- --rc-htpasswd string A htpasswd file - if not provided no authentication is done
- --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
- --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
- --rc-key string TLS PEM Private key
- --rc-max-header-bytes int Maximum size of request header (default 4096)
- --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
- --rc-no-auth Don't require auth for certain methods
- --rc-pass string Password for authentication
- --rc-realm string Realm for authentication
- --rc-salt string Password hashing salt (default "dlPL2MqE")
- --rc-serve Enable the serving of remote objects
- --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --rc-template string User-specified template
- --rc-user string User name for authentication
- --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
- --rc-web-gui Launch WebGUI on localhost
- --rc-web-gui-force-update Force update to latest version of web gui
- --rc-web-gui-no-open-browser Don't open the browser automatically
- --rc-web-gui-update Check and update to latest version of web gui
- --refresh-times Refresh the modtime of remote files
- --retries int Retry operations this many times if they fail (default 3)
- --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
- --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
- --size-only Skip based on size only, not mod-time or checksum
- --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
- --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
- --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
- --stats-one-line Make the stats fit on one line
- --stats-one-line-date Enable --stats-one-line and add current date/time prefix
- --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
- --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
- --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
- --suffix string Suffix to add to changed files
- --suffix-keep-extension Preserve the extension when using --suffix
- --syslog Use Syslog for logging
- --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
- --temp-dir string Directory rclone will use for temporary files (default "/tmp")
- --timeout Duration IO idle timeout (default 5m0s)
- --tpslimit float Limit HTTP transactions per second to this
- --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
- --track-renames When synchronizing, track file renames and do a server-side move if possible
- --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
- --transfers int Number of file transfers to run in parallel (default 4)
- -u, --update Skip files that are newer on the destination
- --use-cookies Enable session cookiejar
- --use-json-log Use json log format
- --use-mmap Use mmap allocator (see docs)
- --use-server-modtime Use server modified time instead of object metadata
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.62.0")
- -v, --verbose count Print lots more stuff (repeat for more)
+ --ask-password Allow prompt for password for encrypted configuration (default true)
+ --auto-confirm If enabled, do not request console confirmation
+ --backup-dir string Make backups into hierarchy based in DIR
+ --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
+ --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
+ --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --ca-cert stringArray CA certificate used to verify servers
+ --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
+ --check-first Do all the checks before starting transfers
+ --checkers int Number of checkers to run in parallel (default 8)
+ -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
+ --client-cert string Client SSL certificate (PEM) for mutual TLS auth
+ --client-key string Client SSL private key (PEM) for mutual TLS auth
+ --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
+ --compare-dest stringArray Include additional comma separated server-side paths during comparison
+ --config string Config file (default "$HOME/.config/rclone/rclone.conf")
+ --contimeout Duration Connect timeout (default 1m0s)
+ --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
+ --cpuprofile string Write cpu profile to file
+ --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
+ --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
+ --delete-after When synchronizing, delete files on destination after transferring (default)
+ --delete-before When synchronizing, delete files on destination before transferring
+ --delete-during When synchronizing, delete files during transfer
+ --delete-excluded Delete files on dest excluded from sync
+ --disable string Disable a comma separated list of features (use --disable help to see a list)
+ --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
+ --disable-http2 Disable HTTP/2 in the global transport
+ -n, --dry-run Do a trial run with no permanent changes
+ --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
+ --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
+ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
+ --dump-headers Dump HTTP headers - may contain sensitive info
+ --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
+ --fast-list Use recursive list if available; uses more memory but fewer transactions
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
+ --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
+ --header stringArray Set HTTP header for all transactions
+ --header-download stringArray Set HTTP header for download transactions
+ --header-upload stringArray Set HTTP header for upload transactions
+ --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
+ --ignore-case Ignore case in filters (case insensitive)
+ --ignore-case-sync Ignore case when synchronizing
+ --ignore-checksum Skip post copy check of checksums
+ --ignore-errors Delete even if there are I/O errors
+ --ignore-existing Skip all files that exist on destination
+ --ignore-size Ignore size when skipping use mod-time or checksum
+ -I, --ignore-times Don't skip files that match size and time - transfer all files
+ --immutable Do not modify files, fail if existing files have been modified
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --inplace Download directly to destination file instead of atomic download to temp/rename
+ -i, --interactive Enable interactive mode
+ --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
+ --log-file string Log everything to this file
+ --log-format string Comma separated list of log format options (default "date,time")
+ --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
+ --log-systemd Activate systemd integration for the logger
+ --low-level-retries int Number of low level retries to do (default 10)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
+ --max-delete int When synchronizing, limit the number of deletes (default -1)
+ --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
+ --max-transfer SizeSuffix Maximum size of data to transfer (default off)
+ --memprofile string Write memory profile to file
+ -M, --metadata If set, preserve metadata when copying objects
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --metadata-set stringArray Add metadata key=value when uploading
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+ --modify-window Duration Max time diff to be considered the same (default 1ns)
+ --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
+ --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
+ --multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
+ --no-check-certificate Do not verify the server SSL certificate (insecure)
+ --no-check-dest Don't check the destination, copy regardless
+ --no-console Hide console window (supported on Windows only)
+ --no-gzip-encoding Don't set Accept-Encoding: gzip
+ --no-traverse Don't traverse destination file system on copy
+ --no-unicode-normalization Don't normalize unicode characters in filenames
+ --no-update-modtime Don't update destination mod-time if files identical
+ --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
+ --password-command SpaceSepList Command for supplying password for encrypted configuration
+ -P, --progress Show progress during transfer
+ --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
+ -q, --quiet Print as little stuff as possible
+ --rc Enable the remote control server
+ --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
+ --rc-allow-origin string Set the allowed origin for CORS
+ --rc-baseurl string Prefix for URLs - leave blank for root
+ --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --rc-client-ca string Client certificate authority to verify clients with
+ --rc-enable-metrics Enable prometheus metrics on /metrics
+ --rc-files string Path to local files to serve on the HTTP server
+ --rc-htpasswd string A htpasswd file - if not provided no authentication is done
+ --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
+ --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
+ --rc-key string TLS PEM Private key
+ --rc-max-header-bytes int Maximum size of request header (default 4096)
+ --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
+ --rc-no-auth Don't require auth for certain methods
+ --rc-pass string Password for authentication
+ --rc-realm string Realm for authentication
+ --rc-salt string Password hashing salt (default "dlPL2MqE")
+ --rc-serve Enable the serving of remote objects
+ --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --rc-template string User-specified template
+ --rc-user string User name for authentication
+ --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
+ --rc-web-gui Launch WebGUI on localhost
+ --rc-web-gui-force-update Force update to latest version of web gui
+ --rc-web-gui-no-open-browser Don't open the browser automatically
+ --rc-web-gui-update Check and update to latest version of web gui
+ --refresh-times Refresh the modtime of remote files
+ --retries int Retry operations this many times if they fail (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
+ --size-only Skip based on size only, not mod-time or checksum
+ --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
+ --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
+ --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
+ --stats-one-line Make the stats fit on one line
+ --stats-one-line-date Enable --stats-one-line and add current date/time prefix
+ --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
+ --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
+ --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
+ --suffix string Suffix to add to changed files
+ --suffix-keep-extension Preserve the extension when using --suffix
+ --syslog Use Syslog for logging
+ --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
+ --temp-dir string Directory rclone will use for temporary files (default "/tmp")
+ --timeout Duration IO idle timeout (default 5m0s)
+ --tpslimit float Limit HTTP transactions per second to this
+ --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
+ --track-renames When synchronizing, track file renames and do a server-side move if possible
+ --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
+ --transfers int Number of file transfers to run in parallel (default 4)
+ -u, --update Skip files that are newer on the destination
+ --use-cookies Enable session cookiejar
+ --use-json-log Use json log format
+ --use-mmap Use mmap allocator (see docs)
+ --use-server-modtime Use server modified time instead of object metadata
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
+ -v, --verbose count Print lots more stuff (repeat for more)
Backend Flags
These flags are available for every command. They control the backends and may be set in the config file.
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
- --alias-remote string Remote or path to alias
- --azureblob-access-tier string Access tier of blob: hot, cool or archive
- --azureblob-account string Azure Storage Account Name
- --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
- --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
- --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
- --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
- --azureblob-client-id string The ID of the client in use
- --azureblob-client-secret string One of the service principal's client secrets
- --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
- --azureblob-disable-checksum Don't store MD5 checksum with object metadata
- --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
- --azureblob-endpoint string Endpoint for the service
- --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
- --azureblob-key string Storage Account Shared Key
- --azureblob-list-chunk int Size of blob list (default 5000)
- --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
- --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-no-check-container If set, don't attempt to check the container exists or create it
- --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
- --azureblob-password string The user's password (obscured)
- --azureblob-public-access string Public access level of a container: blob or container
- --azureblob-sas-url string SAS URL for container level access only
- --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
- --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
- --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
- --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
- --azureblob-use-emulator Uses local storage emulator if provided as 'true'
- --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
- --azureblob-username string User name (usually an email address)
- --b2-account string Account ID or Application Key ID
- --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
- --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
- --b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
- --b2-download-url string Custom endpoint for downloads
- --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --b2-endpoint string Endpoint for the service
- --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
- --b2-key string Application Key
- --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
- --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --b2-version-at Time Show file versions as they were at the specified time (default off)
- --b2-versions Include old versions in directory listings
- --box-access-token string Box App Primary Access Token
- --box-auth-url string Auth server URL
- --box-box-config-file string Box App config.json location
- --box-box-sub-type string (default "user")
- --box-client-id string OAuth Client Id
- --box-client-secret string OAuth Client Secret
- --box-commit-retries int Max number of times to try committing a multipart file (default 100)
- --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
- --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
- --box-owned-by string Only show items owned by the login (email address) passed in
- --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
- --box-token string OAuth Access Token as a JSON blob
- --box-token-url string Token server url
- --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
- --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
- --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
- --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
- --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
- --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
- --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
- --cache-db-purge Clear all the cached data for this remote on start
- --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
- --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
- --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
- --cache-plex-password string The password of the Plex user (obscured)
- --cache-plex-url string The URL of the Plex server
- --cache-plex-username string The username of the Plex user
- --cache-read-retries int How many times to retry a read from a cache storage (default 10)
- --cache-remote string Remote to cache
- --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
- --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
- --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
- --cache-workers int How many workers should run in parallel to download chunks (default 4)
- --cache-writes Cache file data on writes through the FS
- --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
- --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
- --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
- --chunker-remote string Remote to chunk/unchunk
- --combine-upstreams SpaceSepList Upstreams for combining
- --compress-level int GZIP compression level (-2 to 9) (default -1)
- --compress-mode string Compression mode (default "gzip")
- --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
- --compress-remote string Remote to compress
- -L, --copy-links Follow symlinks and copy the pointed to item
- --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
- --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
- --crypt-filename-encryption string How to encrypt the filenames (default "standard")
- --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
- --crypt-password string Password or pass phrase for encryption (obscured)
- --crypt-password2 string Password or pass phrase for salt (obscured)
- --crypt-remote string Remote to encrypt/decrypt
- --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs
- --crypt-show-mapping For all files listed show how the names encrypt
- --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
- --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
- --drive-auth-owner-only Only consider files owned by the authenticated user
- --drive-auth-url string Auth server URL
- --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
- --drive-client-id string Google Application Client Id
- --drive-client-secret string OAuth Client Secret
- --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
- --drive-disable-http2 Disable drive using http2 (default true)
- --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
- --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
- --drive-formats string Deprecated: See export_formats
- --drive-impersonate string Impersonate this user when using a service account
- --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
- --drive-keep-revision-forever Keep new head revision of each file forever
- --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
- --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
- --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
- --drive-resource-key string Resource key for accessing a link-shared file
- --drive-root-folder-id string ID of the root folder
- --drive-scope string Scope that rclone should use when requesting access from drive
- --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs
- --drive-service-account-credentials string Service Account Credentials JSON blob
- --drive-service-account-file string Service Account Credentials JSON file path
- --drive-shared-with-me Only show files that are shared with me
- --drive-size-as-quota Show sizes as storage quota usage, not actual size
- --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
- --drive-skip-dangling-shortcuts If set skip dangling shortcut files
- --drive-skip-gdocs Skip google documents in all listings
- --drive-skip-shortcuts If set skip shortcut files
- --drive-starred-only Only show files that are starred
- --drive-stop-on-download-limit Make download limit errors be fatal
- --drive-stop-on-upload-limit Make upload limit errors be fatal
- --drive-team-drive string ID of the Shared Drive (Team Drive)
- --drive-token string OAuth Access Token as a JSON blob
- --drive-token-url string Token server url
- --drive-trashed-only Only show files that are in the trash
- --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
- --drive-use-created-date Use file created date instead of modified date
- --drive-use-shared-date Use date file was shared instead of modified date
- --drive-use-trash Send files to the trash instead of deleting permanently (default true)
- --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
- --dropbox-auth-url string Auth server URL
- --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
- --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
- --dropbox-batch-size int Max number of files in upload batch
- --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
- --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
- --dropbox-client-id string OAuth Client Id
- --dropbox-client-secret string OAuth Client Secret
- --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
- --dropbox-impersonate string Impersonate this user when using a business account
- --dropbox-shared-files Instructs rclone to work on individual shared files
- --dropbox-shared-folders Instructs rclone to work on shared folders
- --dropbox-token string OAuth Access Token as a JSON blob
- --dropbox-token-url string Token server url
- --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
- --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
- --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
- --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
- --fichier-shared-folder string If you want to download a shared folder, add this parameter
- --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --filefabric-permanent-token string Permanent Authentication Token
- --filefabric-root-folder-id string ID of the root folder
- --filefabric-token string Session Token
- --filefabric-token-expiry string Token expiry time
- --filefabric-url string URL of the Enterprise File Fabric to connect to
- --filefabric-version string Version read from the file fabric
- --ftp-ask-password Allow asking for FTP password when needed
- --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
- --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
- --ftp-disable-epsv Disable using EPSV even if server advertises support
- --ftp-disable-mlsd Disable using MLSD even if server advertises support
- --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
- --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
- --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
- --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
- --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
- --ftp-host string FTP host to connect to
- --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --ftp-no-check-certificate Do not verify the TLS certificate of the server
- --ftp-pass string FTP password (obscured)
- --ftp-port int FTP port number (default 21)
- --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
- --ftp-tls Use Implicit FTPS (FTP over TLS)
- --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
- --ftp-user string FTP username (default "$USER")
- --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
- --gcs-anonymous Access public buckets and objects without credentials
- --gcs-auth-url string Auth server URL
- --gcs-bucket-acl string Access Control List for new buckets
- --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
- --gcs-client-id string OAuth Client Id
- --gcs-client-secret string OAuth Client Secret
- --gcs-decompress If set this will decompress gzip encoded objects
- --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gcs-endpoint string Endpoint for the service
- --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
- --gcs-location string Location for the newly created buckets
- --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --gcs-object-acl string Access Control List for new objects
- --gcs-project-number string Project number
- --gcs-service-account-file string Service Account Credentials JSON file path
- --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
- --gcs-token string OAuth Access Token as a JSON blob
- --gcs-token-url string Token server url
- --gphotos-auth-url string Auth server URL
- --gphotos-client-id string OAuth Client Id
- --gphotos-client-secret string OAuth Client Secret
- --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gphotos-include-archived Also view and download archived media
- --gphotos-read-only Set to make the Google Photos backend read only
- --gphotos-read-size Set to read the size of media items
- --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
- --gphotos-token string OAuth Access Token as a JSON blob
- --gphotos-token-url string Token server url
- --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
- --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
- --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
- --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
- --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
- --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
- --hdfs-namenode string Hadoop name node and port
- --hdfs-service-principal-name string Kerberos service principal name for the namenode
- --hdfs-username string Hadoop user name
- --hidrive-auth-url string Auth server URL
- --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
- --hidrive-client-id string OAuth Client Id
- --hidrive-client-secret string OAuth Client Secret
- --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
- --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
- --hidrive-root-prefix string The root/parent folder for all paths (default "/")
- --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
- --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
- --hidrive-token string OAuth Access Token as a JSON blob
- --hidrive-token-url string Token server url
- --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
- --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
- --http-headers CommaSepList Set HTTP headers for all transactions
- --http-no-head Don't use HEAD requests
- --http-no-slash Set this if the site doesn't end directories with /
- --http-url string URL of HTTP host to connect to
- --internetarchive-access-key-id string IAS3 Access Key
- --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
- --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
- --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
- --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
- --internetarchive-secret-access-key string IAS3 Secret Key (password)
- --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
- --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
- --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
- --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
- --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
- --jottacloud-trashed-only Only show files that are in the trash
- --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
- --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --koofr-endpoint string The Koofr API endpoint to use
- --koofr-mountid string Mount ID of the mount to use
- --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
- --koofr-provider string Choose your storage provider
- --koofr-setmtime Does the backend support setting modification time (default true)
- --koofr-user string Your user name
- -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
- --local-case-insensitive Force the filesystem to report itself as case insensitive
- --local-case-sensitive Force the filesystem to report itself as case sensitive
- --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --local-no-check-updated Don't check to see if the files change during upload
- --local-no-preallocate Disable preallocation of disk space for transferred files
- --local-no-set-modtime Disable setting modtime
- --local-no-sparse Disable sparse files for multi-thread downloads
- --local-nounc Disable UNC (long path names) conversion on Windows
- --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
- --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
- --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
- --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --mailru-pass string Password (obscured)
- --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
- --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
- --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
- --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
- --mailru-user string User name (usually email)
- --mega-debug Output more debug from Mega
- --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --mega-hard-delete Delete files permanently rather than putting them into the trash
- --mega-pass string Password (obscured)
- --mega-use-https Use HTTPS for transfers
- --mega-user string User name
- --netstorage-account string Set the NetStorage account name
- --netstorage-host string Domain+path of NetStorage host to connect to
- --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
- --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
- -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
- --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
- --onedrive-auth-url string Auth server URL
- --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
- --onedrive-client-id string OAuth Client Id
- --onedrive-client-secret string OAuth Client Secret
- --onedrive-drive-id string The ID of the drive to use
- --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
- --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
- --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
- --onedrive-link-password string Set the password for links created by the link command
- --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
- --onedrive-link-type string Set the type of the links created by the link command (default "view")
- --onedrive-list-chunk int Size of listing chunk (default 1000)
- --onedrive-no-versions Remove all versions on modifying operations
- --onedrive-region string Choose national cloud region for OneDrive (default "global")
- --onedrive-root-folder-id string ID of the root folder
- --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
- --onedrive-token string OAuth Access Token as a JSON blob
- --onedrive-token-url string Token server url
- --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --oos-compartment string Object storage compartment OCID
- --oos-config-file string Path to OCI config file (default "~/.oci/config")
- --oos-config-profile string Profile name inside the oci config file (default "Default")
- --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --oos-copy-timeout Duration Timeout for copy (default 1m0s)
- --oos-disable-checksum Don't store MD5 checksum with object metadata
- --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --oos-endpoint string Endpoint for Object storage API
- --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --oos-namespace string Object storage namespace
- --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --oos-provider string Choose your Auth Provider (default "env_auth")
- --oos-region string Object storage Region
- --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
- --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
- --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
- --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
- --oos-sse-kms-key-id string if using using your own master key in vault, this header specifies the
- --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
- --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
- --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
- --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
- --opendrive-password string Password (obscured)
- --opendrive-username string Username
- --pcloud-auth-url string Auth server URL
- --pcloud-client-id string OAuth Client Id
- --pcloud-client-secret string OAuth Client Secret
- --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
- --pcloud-password string Your pcloud password (obscured)
- --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
- --pcloud-token string OAuth Access Token as a JSON blob
- --pcloud-token-url string Token server url
- --pcloud-username string Your pcloud username
- --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --qingstor-access-key-id string QingStor Access Key ID
- --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
- --qingstor-connection-retries int Number of connection retries (default 3)
- --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
- --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
- --qingstor-env-auth Get QingStor credentials from runtime
- --qingstor-secret-access-key string QingStor Secret Access Key (password)
- --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
- --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --qingstor-zone string Zone to connect to
- --s3-access-key-id string AWS Access Key ID
- --s3-acl string Canned ACL used when creating buckets and storing or copying objects
- --s3-bucket-acl string Canned ACL used when creating buckets
- --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --s3-decompress If set this will decompress gzip encoded objects
- --s3-disable-checksum Don't store MD5 checksum with object metadata
- --s3-disable-http2 Disable usage of http2 for S3 backends
- --s3-download-url string Custom endpoint for downloads
- --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --s3-endpoint string Endpoint for S3 API
- --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
- --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
- --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
- --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
- --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
- --s3-location-constraint string Location constraint - must be set to match the Region
- --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
- --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
- --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --s3-no-head If set, don't HEAD uploaded objects to check integrity
- --s3-no-head-object If set, do not do HEAD before GET when getting objects
- --s3-no-system-metadata Suppress setting and reading of system metadata
- --s3-profile string Profile to use in the shared credentials file
- --s3-provider string Choose your S3 provider
- --s3-region string Region to connect to
- --s3-requester-pays Enables requester pays option when interacting with S3 bucket
- --s3-secret-access-key string AWS Secret Access Key (password)
- --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
- --s3-session-token string An AWS session token
- --s3-shared-credentials-file string Path to the shared credentials file
- --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
- --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
- --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
- --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
- --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
- --s3-storage-class string The storage class to use when storing new objects in S3
- --s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
- --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
- --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
- --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
- --s3-v2-auth If true use v2 authentication
- --s3-version-at Time Show file versions as they were at the specified time (default off)
- --s3-versions Include old versions in directory listings
- --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
- --seafile-create-library Should rclone create a library if it doesn't exist
- --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
- --seafile-library string Name of the library
- --seafile-library-key string Library password (for encrypted libraries only) (obscured)
- --seafile-pass string Password (obscured)
- --seafile-url string URL of seafile host to connect to
- --seafile-user string User name (usually email address)
- --sftp-ask-password Allow asking for SFTP password when needed
- --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
- --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
- --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
- --sftp-disable-concurrent-reads If set don't use concurrent reads
- --sftp-disable-concurrent-writes If set don't use concurrent writes
- --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
- --sftp-host string SSH host to connect to
- --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
- --sftp-key-file string Path to PEM-encoded private key file
- --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
- --sftp-key-pem string Raw PEM-encoded private key
- --sftp-key-use-agent When set forces the usage of the ssh-agent
- --sftp-known-hosts-file string Optional path to known_hosts file
- --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
- --sftp-md5sum-command string The command used to read md5 hashes
- --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
- --sftp-path-override string Override path used by SSH shell commands
- --sftp-port int SSH port number (default 22)
- --sftp-pubkey-file string Optional path to public key file
- --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
- --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
- --sftp-set-modtime Set the modified time on the remote if set (default true)
- --sftp-sha1sum-command string The command used to read sha1 hashes
- --sftp-shell-type string The type of SSH shell on remote server, if any
- --sftp-skip-links Set to skip any symlinks and any other non regular files
- --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
- --sftp-use-fstat If set use fstat instead of stat
- --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
- --sftp-user string SSH username (default "$USER")
- --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
- --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --sharefile-endpoint string Endpoint for API calls
- --sharefile-root-folder-id string ID of the root folder
- --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
- --sia-api-password string Sia Daemon API Password (obscured)
- --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
- --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
- --sia-user-agent string Siad User Agent (default "Sia-Agent")
- --skip-links Don't warn about skipped symlinks
- --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
- --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
- --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
- --smb-host string SMB server hostname to connect to
- --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --smb-pass string SMB password (obscured)
- --smb-port int SMB port number (default 445)
- --smb-spn string Service principal name
- --smb-user string SMB username (default "$USER")
- --storj-access-grant string Access grant
- --storj-api-key string API key
- --storj-passphrase string Encryption passphrase
- --storj-provider string Choose an authentication method (default "existing")
- --storj-satellite-address string Satellite address (default "us1.storj.io")
- --sugarsync-access-key-id string Sugarsync Access Key ID
- --sugarsync-app-id string Sugarsync App ID
- --sugarsync-authorization string Sugarsync authorization
- --sugarsync-authorization-expiry string Sugarsync authorization expiry
- --sugarsync-deleted-id string Sugarsync deleted folder id
- --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
- --sugarsync-hard-delete Permanently delete files if true
- --sugarsync-private-access-key string Sugarsync Private Access Key
- --sugarsync-refresh-token string Sugarsync refresh token
- --sugarsync-root-id string Sugarsync root id
- --sugarsync-user string Sugarsync user
- --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
- --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
- --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
- --swift-auth string Authentication URL for server (OS_AUTH_URL)
- --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
- --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
- --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
- --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
- --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
- --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
- --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
- --swift-key string API key or password (OS_PASSWORD)
- --swift-leave-parts-on-error If true avoid calling abort upload on a failure
- --swift-no-chunk Don't chunk files during streaming upload
- --swift-no-large-objects Disable support for static and dynamic large objects
- --swift-region string Region name - optional (OS_REGION_NAME)
- --swift-storage-policy string The storage policy to use when creating a new container
- --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
- --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
- --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
- --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
- --swift-user string User name to log in (OS_USERNAME)
- --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
- --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
- --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
- --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
- --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
- --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
- --union-upstreams string List of space separated upstreams
- --uptobox-access-token string Your access token
- --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
- --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
- --webdav-bearer-token-command string Command to run to get a bearer token
- --webdav-encoding string The encoding for the backend
- --webdav-headers CommaSepList Set HTTP headers for all transactions
- --webdav-pass string Password (obscured)
- --webdav-url string URL of http host to connect to
- --webdav-user string User name
- --webdav-vendor string Name of the WebDAV site/service/software you are using
- --yandex-auth-url string Auth server URL
- --yandex-client-id string OAuth Client Id
- --yandex-client-secret string OAuth Client Secret
- --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --yandex-hard-delete Delete files permanently rather than putting them into the trash
- --yandex-token string OAuth Access Token as a JSON blob
- --yandex-token-url string Token server url
- --zoho-auth-url string Auth server URL
- --zoho-client-id string OAuth Client Id
- --zoho-client-secret string OAuth Client Secret
- --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
- --zoho-region string Zoho region to connect to
- --zoho-token string OAuth Access Token as a JSON blob
- --zoho-token-url string Token server url
+ --acd-auth-url string Auth server URL
+ --acd-client-id string OAuth Client Id
+ --acd-client-secret string OAuth Client Secret
+ --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
+ --acd-token string OAuth Access Token as a JSON blob
+ --acd-token-url string Token server url
+ --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-remote string Remote or path to alias
+ --azureblob-access-tier string Access tier of blob: hot, cool or archive
+ --azureblob-account string Azure Storage Account Name
+ --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
+ --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
+ --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
+ --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
+ --azureblob-client-id string The ID of the client in use
+ --azureblob-client-secret string One of the service principal's client secrets
+ --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --azureblob-disable-checksum Don't store MD5 checksum with object metadata
+ --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
+ --azureblob-endpoint string Endpoint for the service
+ --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
+ --azureblob-key string Storage Account Shared Key
+ --azureblob-list-chunk int Size of blob list (default 5000)
+ --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
+ --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-no-check-container If set, don't attempt to check the container exists or create it
+ --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
+ --azureblob-password string The user's password (obscured)
+ --azureblob-public-access string Public access level of a container: blob or container
+ --azureblob-sas-url string SAS URL for container level access only
+ --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
+ --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
+ --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
+ --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
+ --azureblob-use-emulator Uses local storage emulator if provided as 'true'
+ --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
+ --azureblob-username string User name (usually an email address)
+ --b2-account string Account ID or Application Key ID
+ --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
+ --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-disable-checksum Disable checksums for large (> upload cutoff) files
+ --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-url string Custom endpoint for downloads
+ --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --b2-endpoint string Endpoint for the service
+ --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
+ --b2-key string Application Key
+ --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
+ --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --b2-version-at Time Show file versions as they were at the specified time (default off)
+ --b2-versions Include old versions in directory listings
+ --box-access-token string Box App Primary Access Token
+ --box-auth-url string Auth server URL
+ --box-box-config-file string Box App config.json location
+ --box-box-sub-type string (default "user")
+ --box-client-id string OAuth Client Id
+ --box-client-secret string OAuth Client Secret
+ --box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
+ --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
+ --box-owned-by string Only show items owned by the login (email address) passed in
+ --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
+ --box-token string OAuth Access Token as a JSON blob
+ --box-token-url string Token server url
+ --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
+ --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
+ --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
+ --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
+ --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
+ --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
+ --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
+ --cache-db-purge Clear all the cached data for this remote on start
+ --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
+ --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
+ --cache-plex-password string The password of the Plex user (obscured)
+ --cache-plex-url string The URL of the Plex server
+ --cache-plex-username string The username of the Plex user
+ --cache-read-retries int How many times to retry a read from a cache storage (default 10)
+ --cache-remote string Remote to cache
+ --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
+ --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
+ --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
+ --cache-workers int How many workers should run in parallel to download chunks (default 4)
+ --cache-writes Cache file data on writes through the FS
+ --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
+ --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
+ --chunker-remote string Remote to chunk/unchunk
+ --combine-upstreams SpaceSepList Upstreams for combining
+ --compress-level int GZIP compression level (-2 to 9) (default -1)
+ --compress-mode string Compression mode (default "gzip")
+ --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
+ --compress-remote string Remote to compress
+ -L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
+ --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
+ --crypt-filename-encryption string How to encrypt the filenames (default "standard")
+ --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
+ --crypt-pass-bad-blocks If set this will pass bad blocks through as all 0
+ --crypt-password string Password or pass phrase for encryption (obscured)
+ --crypt-password2 string Password or pass phrase for salt (obscured)
+ --crypt-remote string Remote to encrypt/decrypt
+ --crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
+ --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
+ --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
+ --drive-auth-owner-only Only consider files owned by the authenticated user
+ --drive-auth-url string Auth server URL
+ --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
+ --drive-client-id string Google Application Client Id
+ --drive-client-secret string OAuth Client Secret
+ --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-disable-http2 Disable drive using http2 (default true)
+ --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
+ --drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
+ --drive-formats string Deprecated: See export_formats
+ --drive-impersonate string Impersonate this user when using a service account
+ --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
+ --drive-keep-revision-forever Keep new head revision of each file forever
+ --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
+ --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
+ --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
+ --drive-resource-key string Resource key for accessing a link-shared file
+ --drive-root-folder-id string ID of the root folder
+ --drive-scope string Scope that rclone should use when requesting access from drive
+ --drive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --drive-service-account-credentials string Service Account Credentials JSON blob
+ --drive-service-account-file string Service Account Credentials JSON file path
+ --drive-shared-with-me Only show files that are shared with me
+ --drive-size-as-quota Show sizes as storage quota usage, not actual size
+ --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
+ --drive-skip-dangling-shortcuts If set skip dangling shortcut files
+ --drive-skip-gdocs Skip google documents in all listings
+ --drive-skip-shortcuts If set skip shortcut files
+ --drive-starred-only Only show files that are starred
+ --drive-stop-on-download-limit Make download limit errors be fatal
+ --drive-stop-on-upload-limit Make upload limit errors be fatal
+ --drive-team-drive string ID of the Shared Drive (Team Drive)
+ --drive-token string OAuth Access Token as a JSON blob
+ --drive-token-url string Token server url
+ --drive-trashed-only Only show files that are in the trash
+ --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
+ --drive-use-created-date Use file created date instead of modified date
+ --drive-use-shared-date Use date file was shared instead of modified date
+ --drive-use-trash Send files to the trash instead of deleting permanently (default true)
+ --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
+ --dropbox-auth-url string Auth server URL
+ --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
+ --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
+ --dropbox-batch-size int Max number of files in upload batch
+ --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
+ --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
+ --dropbox-client-id string OAuth Client Id
+ --dropbox-client-secret string OAuth Client Secret
+ --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
+ --dropbox-impersonate string Impersonate this user when using a business account
+ --dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --dropbox-shared-files Instructs rclone to work on individual shared files
+ --dropbox-shared-folders Instructs rclone to work on shared folders
+ --dropbox-token string OAuth Access Token as a JSON blob
+ --dropbox-token-url string Token server url
+ --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
+ --fichier-cdn Set if you wish to use CDN download links
+ --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
+ --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
+ --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
+ --fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --filefabric-permanent-token string Permanent Authentication Token
+ --filefabric-root-folder-id string ID of the root folder
+ --filefabric-token string Session Token
+ --filefabric-token-expiry string Token expiry time
+ --filefabric-url string URL of the Enterprise File Fabric to connect to
+ --filefabric-version string Version read from the file fabric
+ --ftp-ask-password Allow asking for FTP password when needed
+ --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
+ --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-disable-epsv Disable using EPSV even if server advertises support
+ --ftp-disable-mlsd Disable using MLSD even if server advertises support
+ --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
+ --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
+ --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
+ --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
+ --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
+ --ftp-host string FTP host to connect to
+ --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --ftp-no-check-certificate Do not verify the TLS certificate of the server
+ --ftp-pass string FTP password (obscured)
+ --ftp-port int FTP port number (default 21)
+ --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
+ --ftp-tls Use Implicit FTPS (FTP over TLS)
+ --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
+ --ftp-user string FTP username (default "$USER")
+ --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
+ --gcs-anonymous Access public buckets and objects without credentials
+ --gcs-auth-url string Auth server URL
+ --gcs-bucket-acl string Access Control List for new buckets
+ --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
+ --gcs-client-id string OAuth Client Id
+ --gcs-client-secret string OAuth Client Secret
+ --gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gcs-endpoint string Endpoint for the service
+ --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --gcs-location string Location for the newly created buckets
+ --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --gcs-object-acl string Access Control List for new objects
+ --gcs-project-number string Project number
+ --gcs-service-account-file string Service Account Credentials JSON file path
+ --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
+ --gcs-token string OAuth Access Token as a JSON blob
+ --gcs-token-url string Token server url
+ --gcs-user-project string User project
+ --gphotos-auth-url string Auth server URL
+ --gphotos-client-id string OAuth Client Id
+ --gphotos-client-secret string OAuth Client Secret
+ --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gphotos-include-archived Also view and download archived media
+ --gphotos-read-only Set to make the Google Photos backend read only
+ --gphotos-read-size Set to read the size of media items
+ --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
+ --gphotos-token string OAuth Access Token as a JSON blob
+ --gphotos-token-url string Token server url
+ --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
+ --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
+ --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
+ --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
+ --hdfs-namenode string Hadoop name node and port
+ --hdfs-service-principal-name string Kerberos service principal name for the namenode
+ --hdfs-username string Hadoop user name
+ --hidrive-auth-url string Auth server URL
+ --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
+ --hidrive-client-id string OAuth Client Id
+ --hidrive-client-secret string OAuth Client Secret
+ --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
+ --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
+ --hidrive-root-prefix string The root/parent folder for all paths (default "/")
+ --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
+ --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
+ --hidrive-token string OAuth Access Token as a JSON blob
+ --hidrive-token-url string Token server url
+ --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
+ --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-headers CommaSepList Set HTTP headers for all transactions
+ --http-no-head Don't use HEAD requests
+ --http-no-slash Set this if the site doesn't end directories with /
+ --http-url string URL of HTTP host to connect to
+ --internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
+ --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
+ --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
+ --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
+ --internetarchive-secret-access-key string IAS3 Secret Key (password)
+ --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
+ --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
+ --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
+ --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
+ --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
+ --jottacloud-trashed-only Only show files that are in the trash
+ --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --koofr-endpoint string The Koofr API endpoint to use
+ --koofr-mountid string Mount ID of the mount to use
+ --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
+ --koofr-provider string Choose your storage provider
+ --koofr-setmtime Does the backend support setting modification time (default true)
+ --koofr-user string Your user name
+ -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
+ --local-case-insensitive Force the filesystem to report itself as case insensitive
+ --local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --local-no-check-updated Don't check to see if the files change during upload
+ --local-no-preallocate Disable preallocation of disk space for transferred files
+ --local-no-set-modtime Disable setting modtime
+ --local-no-sparse Disable sparse files for multi-thread downloads
+ --local-nounc Disable UNC (long path names) conversion on Windows
+ --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
+ --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
+ --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
+ --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --mailru-pass string Password (obscured)
+ --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
+ --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
+ --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
+ --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
+ --mailru-user string User name (usually email)
+ --mega-debug Output more debug from Mega
+ --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --mega-hard-delete Delete files permanently rather than putting them into the trash
+ --mega-pass string Password (obscured)
+ --mega-use-https Use HTTPS for transfers
+ --mega-user string User name
+ --netstorage-account string Set the NetStorage account name
+ --netstorage-host string Domain+path of NetStorage host to connect to
+ --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
+ --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
+ -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
+ --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
+ --onedrive-auth-url string Auth server URL
+ --onedrive-av-override Allows download of files the server thinks has a virus
+ --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
+ --onedrive-client-id string OAuth Client Id
+ --onedrive-client-secret string OAuth Client Secret
+ --onedrive-drive-id string The ID of the drive to use
+ --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
+ --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
+ --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
+ --onedrive-link-password string Set the password for links created by the link command
+ --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
+ --onedrive-link-type string Set the type of the links created by the link command (default "view")
+ --onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-no-versions Remove all versions on modifying operations
+ --onedrive-region string Choose national cloud region for OneDrive (default "global")
+ --onedrive-root-folder-id string ID of the root folder
+ --onedrive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --onedrive-token string OAuth Access Token as a JSON blob
+ --onedrive-token-url string Token server url
+ --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --oos-compartment string Object storage compartment OCID
+ --oos-config-file string Path to OCI config file (default "~/.oci/config")
+ --oos-config-profile string Profile name inside the oci config file (default "Default")
+ --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-disable-checksum Don't store MD5 checksum with object metadata
+ --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --oos-endpoint string Endpoint for Object storage API
+ --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --oos-namespace string Object storage namespace
+ --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --oos-provider string Choose your Auth Provider (default "env_auth")
+ --oos-region string Object storage Region
+ --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
+ --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
+ --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
+ --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
+ --oos-sse-kms-key-id string if using your own master key in vault, this header specifies the
+ --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
+ --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
+ --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
+ --opendrive-password string Password (obscured)
+ --opendrive-username string Username
+ --pcloud-auth-url string Auth server URL
+ --pcloud-client-id string OAuth Client Id
+ --pcloud-client-secret string OAuth Client Secret
+ --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
+ --pcloud-password string Your pcloud password (obscured)
+ --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
+ --pcloud-token string OAuth Access Token as a JSON blob
+ --pcloud-token-url string Token server url
+ --pcloud-username string Your pcloud username
+ --pikpak-auth-url string Auth server URL
+ --pikpak-client-id string OAuth Client Id
+ --pikpak-client-secret string OAuth Client Secret
+ --pikpak-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
+ --pikpak-pass string Pikpak password (obscured)
+ --pikpak-root-folder-id string ID of the root folder
+ --pikpak-token string OAuth Access Token as a JSON blob
+ --pikpak-token-url string Token server url
+ --pikpak-trashed-only Only show files that are in the trash
+ --pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
+ --pikpak-user string Pikpak username
+ --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --qingstor-access-key-id string QingStor Access Key ID
+ --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
+ --qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
+ --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
+ --qingstor-env-auth Get QingStor credentials from runtime
+ --qingstor-secret-access-key string QingStor Secret Access Key (password)
+ --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
+ --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --qingstor-zone string Zone to connect to
+ --s3-access-key-id string AWS Access Key ID
+ --s3-acl string Canned ACL used when creating buckets and storing or copying objects
+ --s3-bucket-acl string Canned ACL used when creating buckets
+ --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --s3-decompress If set this will decompress gzip encoded objects
+ --s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --s3-disable-checksum Don't store MD5 checksum with object metadata
+ --s3-disable-http2 Disable usage of http2 for S3 backends
+ --s3-download-url string Custom endpoint for downloads
+ --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --s3-endpoint string Endpoint for S3 API
+ --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
+ --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
+ --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
+ --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
+ --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
+ --s3-location-constraint string Location constraint - must be set to match the Region
+ --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
+ --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
+ --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --s3-no-head If set, don't HEAD uploaded objects to check integrity
+ --s3-no-head-object If set, do not do HEAD before GET when getting objects
+ --s3-no-system-metadata Suppress setting and reading of system metadata
+ --s3-profile string Profile to use in the shared credentials file
+ --s3-provider string Choose your S3 provider
+ --s3-region string Region to connect to
+ --s3-requester-pays Enables requester pays option when interacting with S3 bucket
+ --s3-secret-access-key string AWS Secret Access Key (password)
+ --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
+ --s3-session-token string An AWS session token
+ --s3-shared-credentials-file string Path to the shared credentials file
+ --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
+ --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
+ --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
+ --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
+ --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
+ --s3-storage-class string The storage class to use when storing new objects in S3
+ --s3-sts-endpoint string Endpoint for STS
+ --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
+ --s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
+ --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
+ --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
+ --s3-v2-auth If true use v2 authentication
+ --s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-versions Include old versions in directory listings
+ --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
+ --seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
+ --seafile-library string Name of the library
+ --seafile-library-key string Library password (for encrypted libraries only) (obscured)
+ --seafile-pass string Password (obscured)
+ --seafile-url string URL of seafile host to connect to
+ --seafile-user string User name (usually email address)
+ --sftp-ask-password Allow asking for SFTP password when needed
+ --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
+ --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
+ --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
+ --sftp-disable-concurrent-reads If set don't use concurrent reads
+ --sftp-disable-concurrent-writes If set don't use concurrent writes
+ --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
+ --sftp-host string SSH host to connect to
+ --sftp-host-key-algorithms SpaceSepList Space separated list of host key algorithms, ordered by preference
+ --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
+ --sftp-key-file string Path to PEM-encoded private key file
+ --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
+ --sftp-key-pem string Raw PEM-encoded private key
+ --sftp-key-use-agent When set forces the usage of the ssh-agent
+ --sftp-known-hosts-file string Optional path to known_hosts file
+ --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
+ --sftp-md5sum-command string The command used to read md5 hashes
+ --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
+ --sftp-path-override string Override path used by SSH shell commands
+ --sftp-port int SSH port number (default 22)
+ --sftp-pubkey-file string Optional path to public key file
+ --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
+ --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
+ --sftp-set-modtime Set the modified time on the remote if set (default true)
+ --sftp-sha1sum-command string The command used to read sha1 hashes
+ --sftp-shell-type string The type of SSH shell on remote server, if any
+ --sftp-skip-links Set to skip any symlinks and any other non regular files
+ --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
+ --sftp-use-fstat If set use fstat instead of stat
+ --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
+ --sftp-user string SSH username (default "$USER")
+ --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
+ --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --sharefile-endpoint string Endpoint for API calls
+ --sharefile-root-folder-id string ID of the root folder
+ --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
+ --sia-api-password string Sia Daemon API Password (obscured)
+ --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
+ --sia-user-agent string Siad User Agent (default "Sia-Agent")
+ --skip-links Don't warn about skipped symlinks
+ --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
+ --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
+ --smb-host string SMB server hostname to connect to
+ --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --smb-pass string SMB password (obscured)
+ --smb-port int SMB port number (default 445)
+ --smb-spn string Service principal name
+ --smb-user string SMB username (default "$USER")
+ --storj-access-grant string Access grant
+ --storj-api-key string API key
+ --storj-passphrase string Encryption passphrase
+ --storj-provider string Choose an authentication method (default "existing")
+ --storj-satellite-address string Satellite address (default "us1.storj.io")
+ --sugarsync-access-key-id string Sugarsync Access Key ID
+ --sugarsync-app-id string Sugarsync App ID
+ --sugarsync-authorization string Sugarsync authorization
+ --sugarsync-authorization-expiry string Sugarsync authorization expiry
+ --sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
+ --sugarsync-hard-delete Permanently delete files if true
+ --sugarsync-private-access-key string Sugarsync Private Access Key
+ --sugarsync-refresh-token string Sugarsync refresh token
+ --sugarsync-root-id string Sugarsync root id
+ --sugarsync-user string Sugarsync user
+ --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
+ --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
+ --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
+ --swift-auth string Authentication URL for server (OS_AUTH_URL)
+ --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+ --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+ --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+ --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
+ --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
+ --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
+ --swift-key string API key or password (OS_PASSWORD)
+ --swift-leave-parts-on-error If true avoid calling abort upload on a failure
+ --swift-no-chunk Don't chunk files during streaming upload
+ --swift-no-large-objects Disable support for static and dynamic large objects
+ --swift-region string Region name - optional (OS_REGION_NAME)
+ --swift-storage-policy string The storage policy to use when creating a new container
+ --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
+ --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+ --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+ --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+ --swift-user string User name to log in (OS_USERNAME)
+ --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
+ --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
+ --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
+ --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
+ --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
+ --union-upstreams string List of space separated upstreams
+ --uptobox-access-token string Your access token
+ --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
+ --uptobox-private Set to make uploaded files private
+ --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
+ --webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-encoding string The encoding for the backend
+ --webdav-headers CommaSepList Set HTTP headers for all transactions
+ --webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --webdav-pass string Password (obscured)
+ --webdav-url string URL of http host to connect to
+ --webdav-user string User name
+ --webdav-vendor string Name of the WebDAV site/service/software you are using
+ --yandex-auth-url string Auth server URL
+ --yandex-client-id string OAuth Client Id
+ --yandex-client-secret string OAuth Client Secret
+ --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --yandex-hard-delete Delete files permanently rather than putting them into the trash
+ --yandex-token string OAuth Access Token as a JSON blob
+ --yandex-token-url string Token server url
+ --zoho-auth-url string Auth server URL
+ --zoho-client-id string OAuth Client Id
+ --zoho-client-secret string OAuth Client Secret
+ --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
+ --zoho-region string Zoho region to connect to
+ --zoho-token string OAuth Access Token as a JSON blob
+ --zoho-token-url string Token server url
Docker Volume Plugin
Introduction
Docker 1.9 has added support for creating named volumes via command-line interface and mounting them in containers as a way to share data between them. Since Docker 1.10 you can create named volumes with Docker Compose by descriptions in docker-compose.yml files for use by container groups on a single host. As of Docker 1.12 volumes are supported by Docker Swarm included with Docker Engine and created from descriptions in swarm compose v3 files for use with swarm stacks across multiple cluster nodes.
@@ -9743,7 +9921,7 @@ rclone copy PATH2 PATH2 --filter "+ */" --filter "- **" --cr
# NOTICE: If you make changes to this file you MUST do a --resync run.
# Run with --dry-run to see what changes will be made.
-# Dropbox wont sync some files so filter them away here.
+# Dropbox won't sync some files so filter them away here.
# See https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
- .dropbox.attr
- ~*.tmp
@@ -9933,7 +10111,7 @@ Options:
move-listings <prefix>
Similar to copy-listings
but removes the source
purge-children <dir>
This will delete all child files and purge all child subdirs under given directory but keep the parent intact. This behavior is important for tests with Google Drive because removing and re-creating the parent would change its ID.
delete-file <file>
Delete a single file.
-delete-glob <dir> <pattern>
Delete a group of files located one level deep in the given directory with names maching a given glob pattern.
+delete-glob <dir> <pattern>
Delete a group of files located one level deep in the given directory with names matching a given glob pattern.
touch-glob YYYY-MM-DD <dir> <pattern>
Change modification time on a group of files.
touch-copy YYYY-MM-DD <source-file> <dest-dir>
Change file modification time then copy it to destination.
copy-file <source-file> <dest-dir>
Copy a single file to given directory.
@@ -10188,6 +10366,15 @@ y/e/d> y
Type: string
Required: false
+--fichier-cdn
+Set if you wish to use CDN download links.
+Properties:
+
+- Config: cdn
+- Env Var: RCLONE_FICHIER_CDN
+- Type: bool
+- Default: false
+
--fichier-encoding
The encoding for the backend.
See the encoding section in the overview for more info.
@@ -10487,12 +10674,14 @@ y/e/d> y
Arvan Cloud Object Storage (AOS)
DigitalOcean Spaces
Dreamhost
+GCS
Huawei OBS
IBM COS S3
IDrive e2
IONOS Cloud
Liara Object Storage
Minio
+Petabox
Qiniu Cloud Object Storage (Kodo)
RackCorp Object Storage
Scaleway
@@ -10764,7 +10953,9 @@ y/e/d>
Setting this flag increases the chance for undetected upload failures.
Hashes
For small objects which weren't uploaded as multipart uploads (objects sized below --s3-upload-cutoff
if uploaded with rclone) rclone uses the ETag:
header as an MD5 checksum.
-However for objects which were uploaded as multipart uploads or with server side encryption (SSE-AWS or SSE-C) the ETag
header is no longer the MD5 sum of the data, so rclone adds an additional piece of metadata X-Amz-Meta-Md5chksum
which is a base64 encoded MD5 hash (in the same format as is required for Content-MD5
).
+However for objects which were uploaded as multipart uploads or with server side encryption (SSE-AWS or SSE-C) the ETag
header is no longer the MD5 sum of the data, so rclone adds an additional piece of metadata X-Amz-Meta-Md5chksum
which is a base64 encoded MD5 hash (in the same format as is required for Content-MD5
). You can use base64 -d and hexdump to check this value manually:
+echo 'VWTGdNx3LyXQDfA0e2Edxw==' | base64 -d | hexdump
+or you can use rclone check
to verify the hashes are OK.
For large objects, calculating this hash can take some time so the addition of this hash can be disabled with --s3-disable-checksum
. This will mean that these objects do not have an MD5 checksum.
Note that reading this from the object takes an additional HEAD
request as the metadata isn't returned in object listings.
Versions
@@ -10870,7 +11061,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
Secret Access Key: AWS_SECRET_ACCESS_KEY
or AWS_SECRET_KEY
Session Token: AWS_SESSION_TOKEN
(optional)
-Or, use a named profile:
+Or, use a named profile:
- Profile files are standard files used by AWS CLI tools
- By default it will use the profile in your home directory (e.g.
~/.aws/credentials
on unix based systems) file and the "default" profile, to change set these environment variables:
@@ -10943,7 +11134,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
As mentioned in the Hashes section, small files that are not uploaded as multipart, use a different tag, causing the upload to fail. A simple solution is to set the --s3-upload-cutoff 0
and force all the files to be uploaded as multipart.
Standard options
-Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
--s3-provider
Choose your S3 provider.
Properties:
@@ -10962,6 +11153,10 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- Alibaba Cloud Object Storage System (OSS) formerly Aliyun
+- "ArvanCloud"
+
+- Arvan Cloud Object Storage (AOS)
+
- "Ceph"
- Ceph Object Storage
@@ -10974,10 +11169,6 @@ $ rclone -q --s3-versions ls s3:cleanup-test
-- "ArvanCloud"
-
-- Arvan Cloud Object Storage (AOS)
-
- "DigitalOcean"
- DigitalOcean Spaces
@@ -10986,6 +11177,10 @@ $ rclone -q --s3-versions ls s3:cleanup-test
+- "GCS"
+
- "HuaweiOBS"
- Huawei Object Storage Service
@@ -11018,6 +11213,10 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- Netease Object Storage (NOS)
+- "Petabox"
+
+- Petabox Object Storage
+
- "RackCorp"
- RackCorp Object Storage
@@ -11515,13 +11714,46 @@ $ rclone -q --s3-versions ls s3:cleanup-test
--s3-region
+Region where your bucket will be created and your data stored.
+Properties:
+
+- Config: region
+- Env Var: RCLONE_S3_REGION
+- Provider: Petabox
+- Type: string
+- Required: false
+- Examples:
+
+- "us-east-1"
+
+- "eu-central-1"
+
+- "ap-southeast-1"
+
+- Asia Pacific (Singapore)
+
+- "me-south-1"
+
+- "sa-east-1"
+
+- South America (São Paulo)
+
+
+
+--s3-region
Region to connect to.
Leave blank if you are using an S3 clone and you don't have a region.
Properties:
- Config: region
- Env Var: RCLONE_S3_REGION
-- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
+- Provider: !AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
- Type: string
- Required: false
- Examples:
@@ -11694,12 +11926,12 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- Required: false
- Examples:
-- "s3.ir-thr-at1.arvanstorage.com"
+
- "s3.ir-thr-at1.arvanstorage.ir"
- The default endpoint - a good choice if you are unsure.
-- Tehran Iran (Asiatech)
+- Tehran Iran (Simin)
-- "s3.ir-tbz-sh1.arvanstorage.com"
+
- "s3.ir-tbz-sh1.arvanstorage.ir"
@@ -11994,6 +12226,44 @@ $ rclone -q --s3-versions ls s3:cleanup-test
--s3-endpoint
+Endpoint for Petabox S3 Object Storage.
+Specify the endpoint from the same region.
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: Petabox
+- Type: string
+- Required: true
+- Examples:
+
+- "s3.petabox.io"
+
+- "s3.us-east-1.petabox.io"
+
+- "s3.eu-central-1.petabox.io"
+
+- "s3.ap-southeast-1.petabox.io"
+
+- Asia Pacific (Singapore)
+
+- "s3.me-south-1.petabox.io"
+
+- "s3.sa-east-1.petabox.io"
+
+- South America (São Paulo)
+
+
+
+--s3-endpoint
Endpoint for Liara Object Storage API.
Properties:
@@ -12011,7 +12281,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for OSS API.
Properties:
@@ -12124,7 +12394,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for OBS API.
Properties:
@@ -12197,7 +12467,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for Scaleway Object Storage.
Properties:
@@ -12222,7 +12492,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for StackPath Object Storage.
Properties:
@@ -12247,7 +12517,24 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
+Endpoint for Google Cloud Storage.
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: GCS
+- Type: string
+- Required: false
+- Examples:
+
+- "https://storage.googleapis.com"
+
+- Google Cloud Storage endpoint
+
+
+
+--s3-endpoint
Endpoint for Storj Gateway.
Properties:
@@ -12264,7 +12551,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for Tencent COS API.
Properties:
@@ -12353,7 +12640,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for RackCorp Object Storage.
Properties:
@@ -12442,7 +12729,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for Qiniu Object Storage.
Properties:
@@ -12483,14 +12770,14 @@ $ rclone -q --s3-versions ls s3:cleanup-test
---s3-endpoint
+--s3-endpoint
Endpoint for S3 API.
Required when using an S3 clone.
Properties:
- Config: endpoint
- Env Var: RCLONE_S3_ENDPOINT
-- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu
+- Provider: !AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox
- Type: string
- Required: false
- Examples:
@@ -12595,9 +12882,13 @@ $ rclone -q --s3-versions ls s3:cleanup-test
-- "s3.ir-thr-at1.arvanstorage.com"
+
- "s3.ir-thr-at1.arvanstorage.ir"
-- ArvanCloud Tehran Iran (Asiatech) endpoint
+- ArvanCloud Tehran Iran (Simin) endpoint
+
+- "s3.ir-tbz-sh1.arvanstorage.ir"
+
+- ArvanCloud Tabriz Iran (Shahriar) endpoint
@@ -12863,7 +13154,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- "ir-thr-at1"
-- Tehran Iran (Asiatech)
+- Tehran Iran (Simin)
- "ir-tbz-sh1"
@@ -13151,7 +13442,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- Config: location_constraint
- Env Var: RCLONE_S3_LOCATION_CONSTRAINT
-- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS
+- Provider: !AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox
- Type: string
- Required: false
@@ -13469,11 +13760,19 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- The Standard class for any upload.
- Suitable for on-demand content like streaming or CDN.
+- Available in all regions.
- "GLACIER"
- Archived storage.
- Prices are lower, but it needs to be restored first to be accessed.
+- Available in FR-PAR and NL-AMS regions.
+
+- "ONEZONE_IA"
+
+- One Zone - Infrequent Access.
+- A good choice for storing secondary backup copies or easily re-creatable data.
+- Available in the FR-PAR region only.
@@ -13507,7 +13806,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
Advanced options
-Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
--s3-bucket-acl
Canned ACL used when creating buckets.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
@@ -13901,6 +14200,16 @@ Windows: "%USERPROFILE%\.aws\credentials"
Type: string
Required: false
+--s3-directory-markers
+Upload an empty object with a trailing slash when a new directory is created
+Empty folders are unsupported for bucket based remotes, this option creates an empty object ending with "/", to persist the folder.
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_S3_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
--s3-use-multipart-etag
Whether to use ETag in multipart uploads for verification
This should be true, false or left unset to use the default for the provider.
@@ -13969,6 +14278,20 @@ Windows: "%USERPROFILE%\.aws\credentials"
Type: Tristate
Default: unset
+--s3-use-accept-encoding-gzip
+Whether to send Accept-Encoding: gzip
header.
+By default, rclone will append Accept-Encoding: gzip
to the request to download compressed objects whenever possible.
+However some providers such as Google Cloud Storage may alter the HTTP headers, breaking the signature of the request.
+A symptom of this would be receiving errors like
+SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
+In this case, you might want to try disabling this option.
+Properties:
+
+- Config: use_accept_encoding_gzip
+- Env Var: RCLONE_S3_USE_ACCEPT_ENCODING_GZIP
+- Type: Tristate
+- Default: unset
+
Suppress setting and reading of system metadata
Properties:
@@ -14316,6 +14639,15 @@ location_constraint =
acl = private
server_side_encryption =
storage_class =
+Google Cloud Storage
+GoogleCloudStorage is an S3-interoperable object storage service from Google Cloud Platform.
+To connect to Google Cloud Storage you will need an access key and secret key. These can be retrieved by creating an HMAC key.
+[gs]
+type = s3
+provider = GCS
+access_key_id = your_access_key
+secret_access_key = your_secret_key
+endpoint = https://storage.googleapis.com
DigitalOcean Spaces
Spaces is an S3-interoperable object storage service from cloud provider DigitalOcean.
To connect to DigitalOcean Spaces you will need an access key and secret key. These can be retrieved on the "Applications & API" page of the DigitalOcean control panel. They will be needed when prompted by rclone config
for your access_key_id
and secret_access_key
.
@@ -15984,6 +16316,152 @@ Name Type
cos s3
Netease NOS
For Netease NOS configure as per the configurator rclone config
setting the provider Netease
. This will automatically set force_path_style = false
which is necessary for it to run properly.
+Petabox
+Here is an example of making a Petabox configuration. First run:
+
+This will guide you through an interactive setup process.
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+n/s> n
+
+Enter name for new remote.
+name> My Petabox Storage
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \ "s3"
+[snip]
+Storage> s3
+
+Option provider.
+Choose your S3 provider.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+[snip]
+XX / Petabox Object Storage
+ \ (Petabox)
+[snip]
+provider> Petabox
+
+Option env_auth.
+Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
+Only applies if access_key_id and secret_access_key is blank.
+Choose a number from below, or type in your own boolean value (true or false).
+Press Enter for the default (false).
+ 1 / Enter AWS credentials in the next step.
+ \ (false)
+ 2 / Get AWS credentials from the environment (env vars or IAM).
+ \ (true)
+env_auth> 1
+
+Option access_key_id.
+AWS Access Key ID.
+Leave blank for anonymous access or runtime credentials.
+Enter a value. Press Enter to leave empty.
+access_key_id> YOUR_ACCESS_KEY_ID
+
+Option secret_access_key.
+AWS Secret Access Key (password).
+Leave blank for anonymous access or runtime credentials.
+Enter a value. Press Enter to leave empty.
+secret_access_key> YOUR_SECRET_ACCESS_KEY
+
+Option region.
+Region where your bucket will be created and your data stored.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ 1 / US East (N. Virginia)
+ \ (us-east-1)
+ 2 / Europe (Frankfurt)
+ \ (eu-central-1)
+ 3 / Asia Pacific (Singapore)
+ \ (ap-southeast-1)
+ 4 / Middle East (Bahrain)
+ \ (me-south-1)
+ 5 / South America (São Paulo)
+ \ (sa-east-1)
+region> 1
+
+Option endpoint.
+Endpoint for Petabox S3 Object Storage.
+Specify the endpoint from the same region.
+Choose a number from below, or type in your own value.
+ 1 / US East (N. Virginia)
+ \ (s3.petabox.io)
+ 2 / US East (N. Virginia)
+ \ (s3.us-east-1.petabox.io)
+ 3 / Europe (Frankfurt)
+ \ (s3.eu-central-1.petabox.io)
+ 4 / Asia Pacific (Singapore)
+ \ (s3.ap-southeast-1.petabox.io)
+ 5 / Middle East (Bahrain)
+ \ (s3.me-south-1.petabox.io)
+ 6 / South America (São Paulo)
+ \ (s3.sa-east-1.petabox.io)
+endpoint> 1
+
+Option acl.
+Canned ACL used when creating buckets and storing or copying objects.
+This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
+For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
+Note that this ACL is applied when server-side copying objects as S3
+doesn't copy the ACL from the source but rather writes a fresh one.
+If the acl is an empty string then no X-Amz-Acl: header is added and
+the default (private) will be used.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ / Owner gets FULL_CONTROL.
+ 1 | No one else has access rights (default).
+ \ (private)
+ / Owner gets FULL_CONTROL.
+ 2 | The AllUsers group gets READ access.
+ \ (public-read)
+ / Owner gets FULL_CONTROL.
+ 3 | The AllUsers group gets READ and WRITE access.
+ | Granting this on a bucket is generally not recommended.
+ \ (public-read-write)
+ / Owner gets FULL_CONTROL.
+ 4 | The AuthenticatedUsers group gets READ access.
+ \ (authenticated-read)
+ / Object owner gets FULL_CONTROL.
+ 5 | Bucket owner gets READ access.
+ | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \ (bucket-owner-read)
+ / Both the object owner and the bucket owner get FULL_CONTROL over the object.
+ 6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \ (bucket-owner-full-control)
+acl> 1
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n> No
+
+Configuration complete.
+Options:
+- type: s3
+- provider: Petabox
+- access_key_id: YOUR_ACCESS_KEY_ID
+- secret_access_key: YOUR_SECRET_ACCESS_KEY
+- region: us-east-1
+- endpoint: s3.petabox.io
+Keep this "My Petabox Storage" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+This will leave the config file looking like this.
+[My Petabox Storage]
+type = s3
+provider = Petabox
+access_key_id = YOUR_ACCESS_KEY_ID
+secret_access_key = YOUR_SECRET_ACCESS_KEY
+region = us-east-1
+endpoint = s3.petabox.io
Storj
Storj is a decentralized cloud storage which can be used through its native protocol or an S3 compatible gateway.
The S3 compatible gateway is configured using rclone config
with a type of s3
and with a provider name of Storj
. Here is an example run of the configurator.
@@ -17265,7 +17743,7 @@ y/e/d> y
Chunker supports hashsums only when a compatible metadata is present. Hence, if you choose metadata format of none
, chunker will report hashsum as UNSUPPORTED
.
Please note that by default metadata is stored only for composite files. If a file is smaller than configured chunk size, chunker will transparently redirect hash requests to wrapped remote, so support depends on that. You will see the empty string as a hashsum of requested type for small files if the wrapped remote doesn't support it.
Many storage backends support MD5 and SHA1 hash types, so does chunker. With chunker you can choose one or another but not both. MD5 is set by default as the most supported type. Since chunker keeps hashes for composite files and falls back to the wrapped remote hash for non-chunked ones, we advise you to choose the same hash type as supported by wrapped remote so that your file listings look coherent.
-If your storage backend does not support MD5 or SHA1 but you need consistent file hashing, configure chunker with md5all
or sha1all
. These two modes guarantee given hash for all files. If wrapped remote doesn't support it, chunker will then add metadata to all files, even small. However, this can double the amount of small files in storage and incur additional service charges. You can even use chunker to force md5/sha1 support in any other remote at expense of sidecar meta objects by setting e.g. chunk_type=sha1all
to force hashsums and chunk_size=1P
to effectively disable chunking.
+If your storage backend does not support MD5 or SHA1 but you need consistent file hashing, configure chunker with md5all
or sha1all
. These two modes guarantee given hash for all files. If wrapped remote doesn't support it, chunker will then add metadata to all files, even small. However, this can double the amount of small files in storage and incur additional service charges. You can even use chunker to force md5/sha1 support in any other remote at expense of sidecar meta objects by setting e.g. hash_type=sha1all
to force hashsums and chunk_size=1P
to effectively disable chunking.
Normally, when a file is copied to chunker controlled remote, chunker will ask the file source for compatible file hash and revert to on-the-fly calculation if none is found. This involves some CPU overhead but provides a guarantee that given hashsum is available. Also, chunker will reject a server-side copy or move operation if source and destination hashsum types are different resulting in the extra network bandwidth, too. In some rare cases this may be undesired, so chunker provides two optional choices: sha1quick
and md5quick
. If the source does not support primary hash type and the quick mode is enabled, chunker will try to fall back to the secondary type. This will save CPU and bandwidth but can result in empty hashsums at destination. Beware of consequences: the sync
command will revert (sometimes silently) to time/size comparison if compatible hashsums between source and target are not found.
Modified time
Chunker stores modification times using the wrapped remote so support depends on that. For a small non-chunked file the chunker overlay simply manipulates modification time of the wrapped remote file. For a composite file with metadata chunker will get and set modification time of the metadata object on the wrapped remote. If file is chunked but metadata format is none
then chunker will use modification time of the first data chunk.
@@ -17870,7 +18348,7 @@ $ rclone -q ls secret:
identical files names will have identical uploaded names
Cloud storage systems have limits on file name length and total path length which rclone is more likely to breach using "Standard" file name encryption. Where file names are less than 156 characters in length issues should not be encountered, irrespective of cloud storage provider.
-An experimental advanced option filename_encoding
is now provided to address this problem to a certain degree. For cloud storage systems with case sensitive file names (e.g. Google Drive), base64
can be used to reduce file name length. For cloud storage systems using UTF-16 to store file names internally (e.g. OneDrive), base32768
can be used to drastically reduce file name length.
+An experimental advanced option filename_encoding
is now provided to address this problem to a certain degree. For cloud storage systems with case sensitive file names (e.g. Google Drive), base64
can be used to reduce file name length. For cloud storage systems using UTF-16 to store file names internally (e.g. OneDrive, Dropbox), base32768
can be used to drastically reduce file name length.
An alternative, future rclone file name encryption mode may tolerate backend provider path length limits.
Directory name encryption
Crypt offers the option of encrypting dir names or leaving them intact. There are two options:
@@ -17881,7 +18359,7 @@ $ rclone -q ls secret:
Modified time and hashes
Crypt stores modification times using the underlying remote so support depends on that.
Hashes are not stored for crypt. However the data integrity is protected by an extremely strong crypto authenticator.
-Use the rclone cryptcheck
command to check the integrity of a crypted remote instead of rclone check
which can't check the checksums properly.
+Use the rclone cryptcheck
command to check the integrity of an encrypted remote instead of rclone check
which can't check the checksums properly.
Standard options
Here are the Standard options specific to crypt (Encrypt/Decrypt a remote).
--crypt-remote
@@ -17916,7 +18394,7 @@ $ rclone -q ls secret:
"off"
- Don't encrypt the file names.
-- Adds a ".bin" extension only.
+- Adds a ".bin", or "suffix" extension only.
@@ -17965,6 +18443,7 @@ $ rclone -q ls secret:
Advanced options
Here are the Advanced options specific to crypt (Encrypt/Decrypt a remote).
--crypt-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type without re-uploading all the data. Just make two crypt backends pointing to two different directories with the single changed parameter and use rclone move to move the files between the crypt remotes.
@@ -18006,6 +18485,16 @@ $ rclone -q ls secret:
+--crypt-pass-bad-blocks
+If set this will pass bad blocks through as all 0.
+This should not be set in normal operation, it should only be set if trying to recover an encrypted file with errors and it is desired to recover as much of the file as possible.
+Properties:
+
+- Config: pass_bad_blocks
+- Env Var: RCLONE_CRYPT_PASS_BAD_BLOCKS
+- Type: bool
+- Default: false
+
--crypt-filename-encoding
How to encode the encrypted filename to text string.
This option could help with shortening the encrypted filename. The suitable option would depend on the way your remote count the filename length and if it's case sensitive.
@@ -18028,10 +18517,20 @@ $ rclone -q ls secret:
"base32768"
- Encode using base32768. Suitable if your remote counts UTF-16 or
-- Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)
+- Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)
+--crypt-suffix
+If this is set it will override the default suffix of ".bin".
+Setting suffix to "none" will result in an empty suffix. This may be useful when the path length is critical.
+Properties:
+
+- Config: suffix
+- Env Var: RCLONE_CRYPT_SUFFIX
+- Type: string
+- Default: ".bin"
+
Any metadata supported by the underlying remote is read and written.
See the metadata docs for more info.
@@ -18056,8 +18555,8 @@ rclone rc backend/command command=encode fs=crypt: file1 [file2...]
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
-Backing up a crypted remote
-If you wish to backup a crypted remote, it is recommended that you use rclone sync
on the encrypted files, and make sure the passwords are the same in the new encrypted remote.
+Backing up an encrypted remote
+If you wish to backup an encrypted remote, it is recommended that you use rclone sync
on the encrypted files, and make sure the passwords are the same in the new encrypted remote.
This will have the following advantages
rclone sync
will check the checksums while copying
@@ -18598,8 +19097,8 @@ y/e/d> y
If an upload batch is idle for more than this long then it will be uploaded.
The default for this is 0 which means rclone will choose a sensible default based on the batch_mode in use.
-- batch_mode: async - default batch_timeout is 500ms
-- batch_mode: sync - default batch_timeout is 10s
+- batch_mode: async - default batch_timeout is 10s
+- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
Properties:
@@ -18618,6 +19117,15 @@ y/e/d> y
- Type: Duration
- Default: 10m0s
+--dropbox-pacer-min-sleep
+Minimum time to sleep between API calls.
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_DROPBOX_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
--dropbox-encoding
The encoding for the backend.
See the encoding section in the overview for more info.
@@ -19181,7 +19689,7 @@ rclone lsf :ftp,host=speedtest.tele2.net,user=anonymous,pass=IXs2wc8OJOz7SYLBk47
File modification time (timestamps) is supported to 1 second resolution for major FTP servers: ProFTPd, PureFTPd, VsFTPd, and FileZilla FTP server. The VsFTPd
server has non-standard implementation of time related protocol commands and needs a special configuration setting: writing_mdtm = true
.
Support for precise file time with other FTP servers varies depending on what protocol extensions they advertise. If all the MLSD
, MDTM
and MFTM
extensions are present, rclone will use them together to provide precise time. Otherwise the times you see on the FTP server through rclone are those of the last file upload.
You can use the following command to check whether rclone can use precise time with your FTP server: rclone backend features your_ftp_remote:
(the trailing colon is important). Look for the number in the line tagged by Precision
designating the remote time precision expressed as nanoseconds. A value of 1000000000
means that file time precision of 1 second is available. A value of 3153600000000000000
(or another large number) means "unsupported".
-Google Cloud Storage
+Google Cloud Storage
Paths are specified as remote:bucket
(or remote:
for the lsd
command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir
.
Configuration
The initial setup for google cloud storage involves getting a token from Google Cloud Storage which you need to do in your browser. rclone config
walks you through it.
@@ -19412,6 +19920,16 @@ y/e/d> y
Type: string
Required: false
+--gcs-user-project
+User project.
+Optional - needed only for requester pays.
+Properties:
+
+- Config: user_project
+- Env Var: RCLONE_GCS_USER_PROJECT
+- Type: string
+- Required: false
+
--gcs-service-account-file
Service Account Credentials JSON file path.
Leave blank normally. Needed only if you want use SA instead of interactive login.
@@ -19786,6 +20304,16 @@ y/e/d> y
Type: string
Required: false
+--gcs-directory-markers
+Upload an empty object with a trailing slash when a new directory is created
+Empty folders are unsupported for bucket based remotes, this option creates an empty object ending with "/", to persist the folder.
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_GCS_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
--gcs-no-check-bucket
If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions rclone does if you know the bucket exists already.
@@ -19951,9 +20479,8 @@ y/e/d> y
To create a service account and obtain its credentials, go to the Google Developer Console.
You must have a project - create one if you don't.
Then go to "IAM & admin" -> "Service Accounts".
-Use the "Create Credentials" button. Fill in "Service account name" with something that identifies your client. "Role" can be empty.
-Tick "Furnish a new private key" - select "Key type JSON".
-Tick "Enable G Suite Domain-wide Delegation". This option makes "impersonation" possible, as documented here: Delegating domain-wide authority to the service account
+Use the "Create Service Account" button. Fill in "Service account name" and "Service account ID" with something that identifies your client.
+Select "Create And Continue". Step 2 and 3 are optional.
These credentials are what rclone will use for authentication. If you ever need to remove access, press the "Delete service account key" button.
@@ -20057,7 +20584,7 @@ trashed=false and 'c' in parents
Shortcuts
In March 2020 Google introduced a new feature in Google Drive called drive shortcuts (API). These will (by September 2020) replace the ability for files or folders to be in multiple folders at once.
Shortcuts are files that link to other files on Google Drive somewhat like a symlink in unix, except they point to the underlying file data (e.g. the inode in unix terms) so they don't break if the source is renamed or moved about.
-Be default rclone treats these as follows.
+By default rclone treats these as follows.
For shortcuts pointing to files:
- When listing a file shortcut appears as the destination file.
@@ -20708,6 +21235,7 @@ trashed=false and 'c' in parents
- Default: 100
--drive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two different Google drives. Note that this isn't enabled by default because it isn't easy to tell if it will work between any two configurations.
Properties:
@@ -20796,6 +21324,27 @@ trashed=false and 'c' in parents
Type: MultiEncoder
Default: InvalidUtf8
+--drive-env-auth
+Get IAM credentials from runtime (environment variables or instance meta data if no env vars).
+Only applies if service_account_file and service_account_credentials is blank.
+Properties:
+
+- Config: env_auth
+- Env Var: RCLONE_DRIVE_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+
+- "false"
+
+- Enter credentials in the next step.
+
+- "true"
+
+- Get GCP IAM credentials from the environment (env vars or IAM).
+
+
+
Backend commands
Here are the commands specific to the drive backend.
Run them with
@@ -21406,7 +21955,7 @@ rclone backend drop Hasher:
Note that setting max_age = 0
will disable checksum caching completely.
If you set max_age = off
, checksums in cache will never age, unless you fully rewrite or delete the file.
Cache storage
-Cached checksums are stored as bolt
database files under rclone cache directory, usually ~/.cache/rclone/kv/
. Databases are maintained one per base backend, named like BaseRemote~hasher.bolt
. Checksums for multiple alias
-es into a single base backend will be stored in the single database. All local paths are treated as aliases into the local
backend (unless crypted or chunked) and stored in ~/.cache/rclone/kv/local~hasher.bolt
. Databases can be shared between multiple rclone processes.
+Cached checksums are stored as bolt
database files under rclone cache directory, usually ~/.cache/rclone/kv/
. Databases are maintained one per base backend, named like BaseRemote~hasher.bolt
. Checksums for multiple alias
-es into a single base backend will be stored in the single database. All local paths are treated as aliases into the local
backend (unless encrypted or chunked) and stored in ~/.cache/rclone/kv/local~hasher.bolt
. Databases can be shared between multiple rclone processes.
HDFS
HDFS is a distributed file-system, part of the Apache Hadoop framework.
Paths are specified as remote:
or remote:path/to/dir
.
@@ -21560,7 +22109,7 @@ username = root
--hdfs-data-transfer-protection
Kerberos data transfer protection: authentication|integrity|privacy.
-Specifies whether or not authentication, data signature integrity checks, and wire encryption is required when communicating the the datanodes. Possible values are 'authentication', 'integrity' and 'privacy'. Used only with KERBEROS enabled.
+Specifies whether or not authentication, data signature integrity checks, and wire encryption are required when communicating with the datanodes. Possible values are 'authentication', 'integrity' and 'privacy'. Used only with KERBEROS enabled.
Properties:
- Config: data_transfer_protection
@@ -22431,7 +22980,7 @@ y/e/d> y
Modified time and hashes
Jottacloud allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not.
Jottacloud supports MD5 type hashes, so you can use the --checksum
flag.
-Note that Jottacloud requires the MD5 hash before upload so if the source does not have an MD5 checksum then the file will be cached temporarily on disk (in location given by --temp-dir) before it is uploaded. Small files will be cached in memory - see the --jottacloud-md5-memory-limit flag. When uploading from local disk the source checksum is always available, so this does not apply. Starting with rclone version 1.52 the same is true for crypted remotes (in older versions the crypt backend would not calculate hashes for uploads from local disk, so the Jottacloud backend had to do it as described above).
+Note that Jottacloud requires the MD5 hash before upload so if the source does not have an MD5 checksum then the file will be cached temporarily on disk (in location given by --temp-dir) before it is uploaded. Small files will be cached in memory - see the --jottacloud-md5-memory-limit flag. When uploading from local disk the source checksum is always available, so this does not apply. Starting with rclone version 1.52 the same is true for encrypted remotes (in older versions the crypt backend would not calculate hashes for uploads from local disk, so the Jottacloud backend had to do it as described above).
Restricted filename characters
In addition to the default restricted characters set the following characters are also replaced:
@@ -23363,7 +23912,7 @@ me@example.com:/$
--mega-use-https
Use HTTPS for transfers.
-MEGA uses plain text HTTP connections by default. Some ISPs throttle HTTP connections, this causes transfers to become very slow. Enabling this will force MEGA to use HTTPS for all transfers. HTTPS is normally not necesary since all data is already encrypted anyway. Enabling it will increase CPU usage and add network overhead.
+MEGA uses plain text HTTP connections by default. Some ISPs throttle HTTP connections, this causes transfers to become very slow. Enabling this will force MEGA to use HTTPS for all transfers. HTTPS is normally not necessary since all data is already encrypted anyway. Enabling it will increase CPU usage and add network overhead.
Properties:
- Config: use_https
@@ -23734,6 +24283,13 @@ y/e/d> y
AZURE_USERNAME
: a username (usually an email address)
AZURE_PASSWORD
: the user's password
+- Workload Identity
+
+AZURE_TENANT_ID
: Tenant to authenticate in.
+AZURE_CLIENT_ID
: Client ID of the application the user will authenticate to.
+AZURE_FEDERATED_TOKEN_FILE
: Path to projected service account token file.
+AZURE_AUTHORITY_HOST
: Authority of an Azure Active Directory endpoint (default: login.microsoftonline.com).
+
Env Auth: 2. Managed Service Identity Credentials
When using Managed Service Identity if the VM(SS) on which this program is running has a system-assigned identity, it will be used by default. If the resource has no system-assigned but exactly one user-assigned identity, the user-assigned identity will be used by default.
@@ -23745,7 +24301,7 @@ y/e/d> y
Then you could access rclone resources like this:
rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER
Or
-rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER
+rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
Which is analogous to using the az
tool:
az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login
Account and Shared Key
@@ -24130,6 +24686,17 @@ container/
+--azureblob-directory-markers
+Upload an empty object with a trailing slash when a new directory is created
+Empty folders are unsupported for bucket based remotes, this option creates an empty object ending with "/", to persist the folder.
+This object also has the metadata "hdi_isfolder = true" to conform to the Microsoft standard.
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_AZUREBLOB_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
--azureblob-no-check-container
If set, don't attempt to check the container exists or create it.
This can be useful when trying to minimise the number of transactions rclone does if you know the container exists already.
@@ -24554,6 +25121,7 @@ y/e/d> y
- Default: false
--onedrive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive Personal drives AND the files to copy are already shared between them. In other cases, rclone will fall back to normal copy (which will be slightly slower).
Properties:
@@ -24644,7 +25212,7 @@ y/e/d> y
--onedrive-hash-type
Specify the hash in use for the backend.
-This specifies the hash type in use. If set to "auto" it will use the default hash which is is QuickXorHash.
+This specifies the hash type in use. If set to "auto" it will use the default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive Personal. For 1.62 and later the default is to use a QuickXorHash for all onedrive types. If an SHA1 hash is desired then set this option accordingly.
From July 2023 QuickXorHash will be the only available hash for both OneDrive for Business and OneDriver Personal.
This can be set to "none" to not use any hashes.
@@ -24683,6 +25251,19 @@ y/e/d> y
+--onedrive-av-override
+Allows download of files the server thinks has a virus.
+The onedrive/sharepoint server may check files uploaded with an Anti Virus checker. If it detects any potential viruses or malware it will block download of the file.
+In this case you will see a message like this
+server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
+If you are 100% sure you want to download this file anyway then use the --onedrive-av-override flag, or av_override = true in the config file.
+Properties:
+
+- Config: av_override
+- Env Var: RCLONE_ONEDRIVE_AV_OVERRIDE
+- Type: bool
+- Default: false
+
--onedrive-encoding
The encoding for the backend.
See the encoding section in the overview for more info.
@@ -25449,7 +26030,7 @@ provider = no_auth
--oos-sse-kms-key-id
-if using using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
+if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
Properties:
Paths are specified as remote:container
(or remote:
for the lsd
command.) You may put subdirectories in too, e.g. remote:container/path/to/dir
.
@@ -25906,7 +26488,7 @@ name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
-XX / OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
+XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
\ "swift"
[snip]
Storage> swift
@@ -25935,6 +26517,8 @@ Choose a number from below, or type in your own value
\ "https://auth.storage.memset.com/v2.0"
6 / OVH
\ "https://auth.cloud.ovh.net/v3"
+ 7 / Blomp Cloud Storage
+ \ "https://authenticate.ain.net"
auth>
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
user_id>
@@ -26057,7 +26641,7 @@ rclone lsd myremote:
Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
Standard options
-Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)).
+Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
--swift-env-auth
Get swift credentials from environment variables in standard OpenStack form.
Properties:
@@ -26131,6 +26715,10 @@ rclone lsd myremote:
+- "https://authenticate.ain.net"
+
--swift-user-id
@@ -26291,7 +26879,7 @@ rclone lsd myremote:
Advanced options
-Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)).
+Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
--swift-leave-parts-on-error
If true avoid calling abort upload on a failure.
It should be set to true for resuming uploads across different sessions.
@@ -26565,10 +27153,220 @@ y/e/d> y
Type: string
Required: false
+PikPak
+PikPak is a private cloud drive.
+Paths are specified as remote:path
, and may be as deep as required, e.g. remote:directory/subdirectory
.
+Configuration
+Here is an example of making a remote for PikPak.
+First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+
+Enter name for new remote.
+name> remote
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+XX / PikPak
+ \ (pikpak)
+Storage> XX
+
+Option user.
+Pikpak username.
+Enter a value.
+user> USERNAME
+
+Option pass.
+Pikpak password.
+Choose an alternative below.
+y) Yes, type in my own password
+g) Generate random password
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n>
+
+Configuration complete.
+Options:
+- type: pikpak
+- user: USERNAME
+- pass: *** ENCRYPTED ***
+- token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
+Keep this "remote" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+Standard options
+Here are the Standard options specific to pikpak (PikPak).
+--pikpak-user
+Pikpak username.
+Properties:
+
+- Config: user
+- Env Var: RCLONE_PIKPAK_USER
+- Type: string
+- Required: true
+
+--pikpak-pass
+Pikpak password.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: pass
+- Env Var: RCLONE_PIKPAK_PASS
+- Type: string
+- Required: true
+
+Advanced options
+Here are the Advanced options specific to pikpak (PikPak).
+--pikpak-client-id
+OAuth Client Id.
+Leave blank normally.
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PIKPAK_CLIENT_ID
+- Type: string
+- Required: false
+
+--pikpak-client-secret
+OAuth Client Secret.
+Leave blank normally.
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
+- Type: string
+- Required: false
+
+--pikpak-token
+OAuth Access Token as a JSON blob.
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PIKPAK_TOKEN
+- Type: string
+- Required: false
+
+--pikpak-auth-url
+Auth server URL.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PIKPAK_AUTH_URL
+- Type: string
+- Required: false
+
+--pikpak-token-url
+Token server url.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PIKPAK_TOKEN_URL
+- Type: string
+- Required: false
+
+--pikpak-root-folder-id
+ID of the root folder. Leave blank normally.
+Fill in for rclone to use a non root folder as its starting point.
+Properties:
+
+- Config: root_folder_id
+- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
+- Type: string
+- Required: false
+
+--pikpak-use-trash
+Send files to the trash instead of deleting permanently.
+Defaults to true, namely sending files to the trash. Use --pikpak-use-trash=false
to delete files permanently instead.
+Properties:
+
+- Config: use_trash
+- Env Var: RCLONE_PIKPAK_USE_TRASH
+- Type: bool
+- Default: true
+
+--pikpak-trashed-only
+Only show files that are in the trash.
+This will show trashed files in their original directory structure.
+Properties:
+
+- Config: trashed_only
+- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
+- Type: bool
+- Default: false
+
+--pikpak-hash-memory-limit
+Files bigger than this will be cached on disk to calculate hash if required.
+Properties:
+
+- Config: hash_memory_limit
+- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
+- Type: SizeSuffix
+- Default: 10Mi
+
+--pikpak-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PIKPAK_ENCODING
+- Type: MultiEncoder
+- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+
+Backend commands
+Here are the commands specific to the pikpak backend.
+Run them with
+rclone backend COMMAND remote:
+The help below will explain what arguments each command takes.
+See the backend command for more info on how to pass options and arguments.
+These can be run on a running backend using the rc command backend/command.
+addurl
+Add offline download task for url
+rclone backend addurl remote: [options] [<arguments>+]
+This command adds offline download task for url.
+Usage:
+rclone backend addurl pikpak:dirpath url
+Downloads will be stored in 'dirpath'. If 'dirpath' is invalid, download will fallback to default 'My Pack' folder.
+decompress
+Request decompress of a file/files in a folder
+rclone backend decompress remote: [options] [<arguments>+]
+This command requests decompress of file/files in a folder.
+Usage:
+rclone backend decompress pikpak:dirpath {filename} -o password=password
+rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
+An optional argument 'filename' can be specified for a file located in 'pikpak:dirpath'. You may want to pass '-o password=password' for a password-protected files. Also, pass '-o delete-src-file' to delete source files after decompression finished.
+Result:
+{
+ "Decompressed": 17,
+ "SourceDeleted": 0,
+ "Errors": 0
+}
+Limitations
+Hashes
+PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.
+Deleted files
+Deleted files will still be visible with --pikpak-trashed-only
even after the trash emptied. This goes away after few days.
premiumize.me
Paths are specified as remote:path
Paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Configuration
+Configuration
The initial setup for premiumize.me involves getting a token from premiumize.me which you need to do in your browser. rclone config
walks you through it.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -26645,7 +27443,7 @@ y/e/d>
Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
-Standard options
+Standard options
Here are the Standard options specific to premiumizeme (premiumize.me).
--premiumizeme-api-key
API Key.
@@ -26657,7 +27455,7 @@ y/e/d>
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to premiumizeme (premiumize.me).
--premiumizeme-encoding
The encoding for the backend.
@@ -26669,14 +27467,14 @@ y/e/d>
Type: MultiEncoder
Default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot
-Limitations
+Limitations
Note that premiumize.me is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".
premiumize.me file names can't have the \
or "
characters in. rclone maps these to and from an identical looking unicode equivalents \
and "
premiumize.me only supports filenames up to 255 characters in length.
put.io
Paths are specified as remote:path
put.io paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Configuration
+Configuration
The initial setup for put.io involves getting a token from put.io which you need to do in your browser. rclone config
walks you through it.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -26760,7 +27558,7 @@ e/n/d/r/c/s/q> q
Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
-Advanced options
+Advanced options
Here are the Advanced options specific to putio (Put.io).
--putio-encoding
The encoding for the backend.
@@ -26772,12 +27570,12 @@ e/n/d/r/c/s/q> q
Type: MultiEncoder
Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
-Limitations
+Limitations
put.io has rate limiting. When you hit a limit, rclone automatically retries after waiting the amount of time requested by the server.
If you want to avoid ever hitting these limits, you may use the --tpslimit
flag with a low number. Note that the imposed limits may be different for different operations, and may change over time.
Seafile
This is a backend for the Seafile storage service: - It works with both the free community edition or the professional edition. - Seafile versions 6.x, 7.x, 8.x and 9.x are all supported. - Encrypted libraries are also supported. - It supports 2FA enabled users - Using a Library API Token is not supported
-Configuration
+Configuration
There are two distinct modes you can setup your remote: - you point your remote to the root of the server, meaning you don't specify a library during the configuration: Paths are specified as remote:library
. You may put subdirectories in too, e.g. remote:library/path/to/dir
. - you point your remote to a specific library during the configuration: Paths are specified as remote:path/to/dir
. This is the recommended mode when using encrypted libraries. (This mode is possibly slightly faster than the root mode)
Configuration in root mode
Here is an example of making a seafile configuration for a user with no two-factor authentication. First run
@@ -26978,7 +27776,7 @@ http://my.seafile.server/d/9ea2455f6f55478bbb0d/
It has been actively developed using the seafile docker image of these versions: - 6.3.4 community edition - 7.0.5 community edition - 7.1.3 community edition - 9.0.10 community edition
Versions below 6.0 are not supported. Versions between 6.0 and 6.3 haven't been tested and might not work properly.
Each new version of rclone
is automatically tested against the latest docker image of the seafile community server.
-Standard options
+Standard options
Here are the Standard options specific to seafile (seafile).
--seafile-url
URL of seafile host to connect to.
@@ -27054,7 +27852,7 @@ http://my.seafile.server/d/9ea2455f6f55478bbb0d/
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to seafile (seafile).
--seafile-create-library
Should rclone create a library if it doesn't exist.
@@ -27086,7 +27884,7 @@ http://my.seafile.server/d/9ea2455f6f55478bbb0d/
Paths are specified as remote:path
. If the path does not begin with a /
it is relative to the home directory of the user. An empty path remote:
refers to the user's home directory. For example, rclone lsd remote:
would list the home directory of the user configured in the rclone remote config (i.e /home/sftpuser
). However, rclone lsd remote:/
would list the root directory for remote machine (i.e. /
)
Note that some SFTP servers will need the leading / - Synology is a good example of this. rsync.net and Hetzner, on the other hand, requires users to OMIT the leading /.
Note that by default rclone will try to execute shell commands on the server, see shell access considerations.
-Configuration
+Configuration
Here is an example of making an SFTP configuration. First run
rclone config
This will guide you through an interactive setup process.
@@ -27229,7 +28027,7 @@ known_hosts_file = ~/.ssh/known_hosts
About command
The about
command returns the total space, free space, and used space on the remote for the disk of the specified path on the remote or, if not set, the disk of the root on the remote.
SFTP usually supports the about command, but it depends on the server. If the server implements the vendor-specific VFS statistics extension, which is normally the case with OpenSSH instances, it will be used. If not, but the same login has access to a Unix shell, where the df
command is available (e.g. in the remote's PATH), then this will be used instead. If the server shell is PowerShell, probably with a Windows OpenSSH server, rclone will use a built-in shell command (see shell access). If none of the above is applicable, about
will fail.
-Standard options
+Standard options
Here are the Standard options specific to sftp (SSH/SFTP).
--sftp-host
SSH host to connect to.
@@ -27363,7 +28161,7 @@ known_hosts_file = ~/.ssh/known_hosts
Type: bool
Default: false
-Advanced options
+Advanced options
Here are the Advanced options specific to sftp (SSH/SFTP).
--sftp-known-hosts-file
Optional path to known_hosts file.
@@ -27568,7 +28366,7 @@ known_hosts_file = ~/.ssh/known_hosts
to be passed to the sftp client and to any commands run (eg md5sum).
Pass multiple variables space separated, eg
VAR1=value VAR2=value
-and pass variables with spaces in in quotes, eg
+and pass variables with spaces in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
Properties:
@@ -27615,7 +28413,20 @@ known_hosts_file = ~/.ssh/known_hosts
- Type: SpaceSepList
- Default:
-Limitations
+--sftp-host-key-algorithms
+Space separated list of host key algorithms, ordered by preference.
+At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
+Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
+Example:
+ssh-ed25519 ssh-rsa ssh-dss
+Properties:
+
+- Config: host_key_algorithms
+- Env Var: RCLONE_SFTP_HOST_KEY_ALGORITHMS
+- Type: SpaceSepList
+- Default:
+
+Limitations
On some SFTP servers (e.g. Synology) the paths are different for SSH and SFTP so the hashes can't be calculated properly. For them using disable_hashcheck
is a good idea.
The only ssh agent supported under Windows is Putty's pageant.
The Go SSH library disables the use of the aes128-cbc cipher by default, due to security concerns. This can be re-enabled on a per-connection basis by setting the use_insecure_cipher
setting in the configuration file to true
. Further details on the insecurity of this cipher can be found in this paper.
@@ -27633,10 +28444,10 @@ known_hosts_file = ~/.ssh/known_hosts
This relies on go-smb2 library for communication with SMB protocol.
Paths are specified as remote:sharename
(or remote:
for the lsd
command.) You may put subdirectories in too, e.g. remote:item/path/to/dir
.
Notes
-The first path segment must be the name of the share, which you entered when you started to share on Windows. On smbd, it's the section title in smb.conf
(usually in /etc/samba/
) file. You can find shares by quering the root if you're unsure (e.g. rclone lsd remote:
).
+The first path segment must be the name of the share, which you entered when you started to share on Windows. On smbd, it's the section title in smb.conf
(usually in /etc/samba/
) file. You can find shares by querying the root if you're unsure (e.g. rclone lsd remote:
).
You can't access to the shared printers from rclone, obviously.
You can't use Anonymous access for logging in. You have to use the guest
user with an empty password instead. The rclone client tries to avoid 8.3 names when uploading files by encoding trailing spaces and periods. Alternatively, the local backend on Windows can access SMB servers using UNC paths, by \\server\share
. This doesn't apply to non-Windows OSes, such as Linux and macOS.
-Configuration
+Configuration
Here is an example of making a SMB configuration.
First run
rclone config
@@ -27711,7 +28522,7 @@ y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> d
-Standard options
+Standard options
Here are the Standard options specific to smb (SMB / CIFS).
--smb-host
SMB server hostname to connect to.
@@ -27772,7 +28583,7 @@ y/e/d> d
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to smb (SMB / CIFS).
--smb-idle-timeout
Max time before closing idle connections.
@@ -27873,7 +28684,7 @@ y/e/d> d
S3 backend: secret encryption key is shared with the gateway
-Configuration
+Configuration
To make a new Storj configuration you need one of the following: * Access Grant that someone else shared with you. * API Key of a Storj project you are a member of.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -27970,7 +28781,7 @@ y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
-Standard options
+Standard options
Here are the Standard options specific to storj (Storj Decentralized Cloud Storage).
--storj-provider
Choose an authentication method.
@@ -28103,7 +28914,7 @@ y/e/d> y
rclone sync --interactive --progress remote-us:bucket/path/to/dir/ remote-europe:bucket/path/to/dir/
Or even between another cloud storage and Storj.
rclone sync --interactive --progress s3:bucket/path/to/dir/ storj:bucket/path/to/dir/
-Limitations
+Limitations
rclone about
is not supported by the rclone Storj backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
See List of backends that do not support rclone about and rclone about
Known issues
@@ -28111,7 +28922,7 @@ y/e/d> y
To fix these, please raise your system limits. You can do this issuing a ulimit -n 65536
just before you run rclone. To change the limits more permanently you can add this to your shell startup script, e.g. $HOME/.bashrc
, or change the system-wide configuration, usually /etc/sysctl.conf
and/or /etc/security/limits.conf
, but please refer to your operating system manual.
SugarSync
SugarSync is a cloud service that enables active synchronization of files across computers and other devices for file backup, access, syncing, and sharing.
-Configuration
+Configuration
The initial setup for SugarSync involves getting a token from SugarSync which you can do with rclone. rclone config
walks you through it.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -28184,7 +28995,7 @@ y/e/d> y
Deleting files
Deleted files will be moved to the "Deleted items" folder by default.
However you can supply the flag --sugarsync-hard-delete
or set the config parameter hard_delete = true
if you would like files to be deleted straight away.
-Standard options
+Standard options
Here are the Standard options specific to sugarsync (Sugarsync).
--sugarsync-app-id
Sugarsync App ID.
@@ -28225,7 +29036,7 @@ y/e/d> y
Type: bool
Default: false
-Advanced options
+Advanced options
Here are the Advanced options specific to sugarsync (Sugarsync).
--sugarsync-refresh-token
Sugarsync refresh token.
@@ -28297,7 +29108,7 @@ y/e/d> y
Type: MultiEncoder
Default: Slash,Ctl,InvalidUtf8,Dot
-Limitations
+Limitations
rclone about
is not supported by the SugarSync backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
See List of backends that do not support rclone about and rclone about
Tardigrade
@@ -28306,7 +29117,7 @@ y/e/d> y
This is a Backend for Uptobox file storage service. Uptobox is closer to a one-click hoster than a traditional cloud storage provider and therefore not suitable for long term storage.
Paths are specified as remote:path
Paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Configuration
+Configuration
To configure an Uptobox backend you'll need your personal api token. You'll find it in your account settings
Here is an example of how to make a remote called remote
with the default setup. First run:
rclone config
@@ -28361,7 +29172,7 @@ y/e/d>
To copy a local directory to an Uptobox directory called backup
rclone copy /home/source remote:backup
Modified time and hashes
-Uptobox supports neither modified times nor checksums.
+Uptobox supports neither modified times nor checksums. All timestamps will read as that set by --default-time
.
Restricted filename characters
In addition to the default restricted characters set the following characters are also replaced:
@@ -28386,7 +29197,7 @@ y/e/d>
Invalid UTF-8 bytes will also be replaced, as they can't be used in XML strings.
-Standard options
+Standard options
Here are the Standard options specific to uptobox (Uptobox).
--uptobox-access-token
Your access token.
@@ -28398,8 +29209,17 @@ y/e/d>
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to uptobox (Uptobox).
+--uptobox-private
+Set to make uploaded files private
+Properties:
+
+- Config: private
+- Env Var: RCLONE_UPTOBOX_PRIVATE
+- Type: bool
+- Default: false
+
--uptobox-encoding
The encoding for the backend.
See the encoding section in the overview for more info.
@@ -28410,7 +29230,7 @@ y/e/d>
Type: MultiEncoder
Default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot
-Limitations
+Limitations
Uptobox will delete inactive files that have not been accessed in 60 days.
rclone about
is not supported by this backend an overview of used space can however been seen in the uptobox web interface.
Union
@@ -28420,7 +29240,7 @@ y/e/d>
Attribute :ro
and :nc
can be attach to the end of path to tag the remote as read only or no create, e.g. remote:directory/subdirectory:ro
or remote:directory/subdirectory:nc
.
Subfolders can be used in upstream remotes. Assume a union remote named backup
with the remotes mydrive:private/backup
. Invoking rclone mkdir backup:desktop
is exactly the same as invoking rclone mkdir mydrive:private/backup/desktop
.
There will be no special handling of paths containing ..
segments. Invoking rclone mkdir backup:../desktop
is exactly the same as invoking rclone mkdir mydrive:private/backup/../desktop
.
-Configuration
+Configuration
Here is an example of how to make a union called remote
for local folders. First run:
rclone config
This will guide you through an interactive setup process:
@@ -28641,7 +29461,7 @@ e/n/d/r/c/s/q> q
-Standard options
+Standard options
Here are the Standard options specific to union (Union merges the contents of several upstream fs).
--union-upstreams
List of space separated upstreams.
@@ -28690,7 +29510,7 @@ e/n/d/r/c/s/q> q
Type: int
Default: 120
-Advanced options
+Advanced options
Here are the Advanced options specific to union (Union merges the contents of several upstream fs).
--union-min-free-space
Minimum viable free space for lfs/eplfs policies.
@@ -28708,7 +29528,7 @@ e/n/d/r/c/s/q> q
WebDAV
Paths are specified as remote:path
Paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Configuration
+Configuration
To configure the WebDAV remote you will need to have a URL for it, and a username and password. If you know what kind of system you are connecting to then rclone can enable extra features.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -28733,17 +29553,19 @@ Choose a number from below, or type in your own value
url> https://example.com/remote.php/webdav/
Name of the WebDAV site/service/software you are using
Choose a number from below, or type in your own value
- 1 / Nextcloud
- \ "nextcloud"
- 2 / Owncloud
- \ "owncloud"
- 3 / Sharepoint Online, authenticated by Microsoft account.
- \ "sharepoint"
- 4 / Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
- \ "sharepoint-ntlm"
- 5 / Other site/service or software
- \ "other"
-vendor> 1
+ 1 / Fastmail Files
+ \ (fastmail)
+ 2 / Nextcloud
+ \ (nextcloud)
+ 3 / Owncloud
+ \ (owncloud)
+ 4 / Sharepoint Online, authenticated by Microsoft account
+ \ (sharepoint)
+ 5 / Sharepoint with NTLM authentication, usually self-hosted or on-premises
+ \ (sharepoint-ntlm)
+ 6 / Other site/service or software
+ \ (other)
+vendor> 2
User name
user> user
Password.
@@ -28779,9 +29601,9 @@ y/e/d> y
To copy a local directory to an WebDAV directory called backup
rclone copy /home/source remote:backup
Modified time and hashes
-Plain WebDAV does not support modified times. However when used with Owncloud or Nextcloud rclone will support modified times.
-Likewise plain WebDAV does not support hashes, however when used with Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes. Depending on the exact version of Owncloud or Nextcloud hashes may appear on all objects, or only on objects which had a hash uploaded with them.
-Standard options
+Plain WebDAV does not support modified times. However when used with Fastmail Files, Owncloud or Nextcloud rclone will support modified times.
+Likewise plain WebDAV does not support hashes, however when used with Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes. Depending on the exact version of Owncloud or Nextcloud hashes may appear on all objects, or only on objects which had a hash uploaded with them.
+Standard options
Here are the Standard options specific to webdav (WebDAV).
--webdav-url
URL of http host to connect to.
@@ -28803,6 +29625,10 @@ y/e/d> y
Required: false
Examples:
+--webdav-pacer-min-sleep
+Minimum time to sleep between API calls.
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_WEBDAV_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
+--webdav-nextcloud-chunk-size
+Nextcloud upload chunk size.
+We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances. See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
+Set to 0 to disable chunked uploading.
+Properties:
+
+- Config: nextcloud_chunk_size
+- Env Var: RCLONE_WEBDAV_NEXTCLOUD_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 10Mi
+
Provider notes
See below for notes on specific providers.
+Fastmail Files
+Use https://webdav.fastmail.com/
or a subdirectory as the URL, and your Fastmail email username@domain.tld
as the username. Follow this documentation to create an app password with access to Files (WebDAV)
and use this as the password.
+Fastmail supports modified times using the X-OC-Mtime
header.
Owncloud
Click on the settings cog in the bottom right of the page and this will show the WebDAV URL that rclone needs in the config step. It will look something like https://example.com/remote.php/webdav/
.
Owncloud supports modified times using the X-OC-Mtime
header.
@@ -28961,7 +29810,7 @@ vendor = other
bearer_token_command = oidc-token XDC
Yandex Disk
Yandex Disk is a cloud storage solution created by Yandex.
-Configuration
+Configuration
Here is an example of making a yandex configuration. First run
rclone config
This will guide you through an interactive setup process:
@@ -29026,7 +29875,7 @@ y/e/d> y
Restricted filename characters
The default restricted characters set are replaced.
Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
-Standard options
+Standard options
Here are the Standard options specific to yandex (Yandex Disk).
--yandex-client-id
OAuth Client Id.
@@ -29048,7 +29897,7 @@ y/e/d> y
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to yandex (Yandex Disk).
--yandex-token
OAuth Access Token as a JSON blob.
@@ -29098,13 +29947,13 @@ y/e/d> y
Type: MultiEncoder
Default: Slash,Del,Ctl,InvalidUtf8,Dot
-Limitations
+Limitations
When uploading very large files (bigger than about 5 GiB) you will need to increase the --timeout
parameter. This is because Yandex pauses (perhaps to calculate the MD5SUM for the entire file) before returning confirmation that the file has been uploaded. The default handling of timeouts in rclone is to assume a 5 minute pause is an error and close the connection - you'll see net/http: timeout awaiting response headers
errors in the logs if this is happening. Setting the timeout to twice the max size of file in GiB should be enough, so if you want to upload a 30 GiB file set a timeout of 2 * 30 = 60m
, that is --timeout 60m
.
Having a Yandex Mail account is mandatory to use the Yandex.Disk subscription. Token generation will work without a mail account, but Rclone won't be able to complete any actions.
[403 - DiskUnsupportedUserAccountTypeError] User account type is not supported.
Zoho Workdrive
Zoho WorkDrive is a cloud storage solution created by Zoho.
-Configuration
+Configuration
Here is an example of making a zoho configuration. First run
rclone config
This will guide you through an interactive setup process:
@@ -29185,7 +30034,7 @@ y/e/d>
To view your current quota you can use the rclone about remote:
command which will display your current usage.
Restricted filename characters
Only control characters and invalid UTF-8 are replaced. In addition most Unicode full-width characters are not supported at all and will be removed from filenames during upload.
-Standard options
+Standard options
Here are the Standard options specific to zoho (Zoho).
--zoho-client-id
OAuth Client Id.
@@ -29244,7 +30093,7 @@ y/e/d>
-Advanced options
+Advanced options
Here are the Advanced options specific to zoho (Zoho).
--zoho-token
OAuth Access Token as a JSON blob.
@@ -29297,7 +30146,7 @@ y/e/d>
Local paths are specified as normal filesystem paths, e.g. /path/to/wherever
, so
rclone sync --interactive /home/source /tmp/destination
Will sync /home/source
to /tmp/destination
.
-Configuration
+Configuration
For consistencies sake one can also configure a remote of type local
in the config file, and access the local filesystem using rclone remote paths, e.g. remote:path/to/wherever
, but it is probably easier not to.
Modified time
Rclone reads and writes the modified time using an accuracy determined by the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second on OS X.
@@ -29668,7 +30517,7 @@ $ tree /tmp/b
0 file2
NB Rclone (like most unix tools such as du
, rsync
and tar
) treats a bind mount to the same device as being on the same filesystem.
NB This flag is only available on Unix based systems. On systems where it isn't supported (e.g. Windows) it will be ignored.
-Advanced options
+Advanced options
Here are the Advanced options specific to local (Local Disk).
--local-nounc
Disable UNC (long path names) conversion on Windows.
@@ -29905,7 +30754,7 @@ $ tree /tmp/b
See the metadata docs for more info.
-Backend commands
+Backend commands
Here are the commands specific to the local backend.
Run them with
rclone backend COMMAND remote:
@@ -29922,6 +30771,285 @@ $ tree /tmp/b
"error": return an error based on option value
Changelog
+v1.63.0 - 2023-06-30
+See commits
+
+- New backends
+
+- Pikpak (wiserain)
+- New S3 providers
+
+- New WebDAV providers
+
+
+- Major changes
+
+- Files will be copied to a temporary name ending in
.partial
when copying to local
,ftp
,sftp
then renamed at the end of the transfer. (Janne Hellsten, Nick Craig-Wood)
+
+- This helps with data integrity as we don't delete the existing file until the new one is complete.
+- It can be disabled with the --inplace flag.
+- This behaviour will also happen if the backend is wrapped, for example
sftp
wrapped with crypt
.
+
+- The s3, azureblob and gcs backends now support directory markers so empty directories are supported (Jānis Bebrītis, Nick Craig-Wood)
+- The --default-time flag now controls the unknown modification time of files/dirs (Nick Craig-Wood)
+
+- If a file or directory does not have a modification time rclone can read then rclone will display this fixed time instead.
+- For the old behaviour use
--default-time 0s
which will set this time to the time rclone started up.
+
+
+- New Features
+
+- build
+
+- Modernise linters in use and fixup all affected code (albertony)
+- Push docker beta to GHCR (GitHub container registry) (Richard Tweed)
+
+- cat: Add
--separator
option to cat command (Loren Gordon)
+- config
+
+- Do not remove/overwrite other files during config file save (albertony)
+- Do not overwrite config file symbolic link (albertony)
+- Stop
config create
making invalid config files (Nick Craig-Wood)
+
+- doc updates (Adam K, Aditya Basu, albertony, asdffdsazqqq, Damo, danielkrajnik, Dimitri Papadopoulos, dlitster, Drew Parsons, jumbi77, kapitainsky, mac-15, Mariusz Suchodolski, Nick Craig-Wood, NickIAm, Rintze Zelle, Stanislav Gromov, Tareq Sharafy, URenko, yuudi, Zach Kipp)
+- fs
+
+- Add
size
to JSON logs when moving or copying an object (Nick Craig-Wood)
+- Allow boolean features to be enabled with
--disable !Feature
(Nick Craig-Wood)
+
+- genautocomplete: Rename to
completion
with alias to the old name (Nick Craig-Wood)
+- librclone: Added example on using
librclone
with Go (alankrit)
+- lsjson: Make
--stat
more efficient (Nick Craig-Wood)
+- operations
+
+- Implement
--multi-thread-write-buffer-size
for speed improvements on downloads (Paulo Schreiner)
+- Reopen downloads on error when using
check --download
and cat
(Nick Craig-Wood)
+
+- rc:
config/listremotes
includes remotes defined with environment variables (kapitainsky)
+- selfupdate: Obey
--no-check-certificate
flag (Nick Craig-Wood)
+- serve restic: Trigger systemd notify (Shyim)
+- serve webdav: Implement owncloud checksum and modtime extensions (WeidiDeng)
+- sync:
--suffix-keep-extension
preserve 2 part extensions like .tar.gz (Nick Craig-Wood)
+
+- Bug Fixes
+
+- accounting
+
+- Fix Prometheus metrics to be the same as
core/stats
(Nick Craig-Wood)
+- Bwlimit signal handler should always start (Sam Lai)
+
+- bisync: Fix
maxDelete
parameter being ignored via the rc (Nick Craig-Wood)
+- cmd/ncdu: Fix screen corruption when logging (eNV25)
+- filter: Fix deadlock with errors on
--files-from
(douchen)
+- fs
+
+- Fix interaction between
--progress
and --interactive
(Nick Craig-Wood)
+- Fix infinite recursive call in pacer ModifyCalculator (fixes issue reported by the staticcheck linter) (albertony)
+
+- lib/atexit: Ensure OnError only calls cancel function once (Nick Craig-Wood)
+- lib/rest: Fix problems re-using HTTP connections (Nick Craig-Wood)
+- rc
+
+- Fix
operations/stat
with trailing /
(Nick Craig-Wood)
+- Fix missing
--rc
flags (Nick Craig-Wood)
+- Fix output of Time values in
options/get
(Nick Craig-Wood)
+
+- serve dlna: Fix potential data race (Nick Craig-Wood)
+- version: Fix reported os/kernel version for windows (albertony)
+
+- Mount
+
+- Add
--mount-case-insensitive
to force the mount to be case insensitive (Nick Craig-Wood)
+- Removed unnecessary byte slice allocation for reads (Anagh Kumar Baranwal)
+- Clarify rclone mount error when installed via homebrew (Nick Craig-Wood)
+- Added _netdev to the example mount so it gets treated as a remote-fs rather than local-fs (Anagh Kumar Baranwal)
+
+- Mount2
+
+- Updated go-fuse version (Anagh Kumar Baranwal)
+- Fixed statfs (Anagh Kumar Baranwal)
+- Disable xattrs (Anagh Kumar Baranwal)
+
+- VFS
+
+- Add MkdirAll function to make a directory and all beneath (Nick Craig-Wood)
+- Fix reload: failed to add virtual dir entry: file does not exist (Nick Craig-Wood)
+- Fix writing to a read only directory creating spurious directory entries (WeidiDeng)
+- Fix potential data race (Nick Craig-Wood)
+- Fix backends being Shutdown too early when startup takes a long time (Nick Craig-Wood)
+
+- Local
+
+- Fix filtering of symlinks with
-l
/--links
flag (Nick Craig-Wood)
+- Fix /path/to/file.rclonelink when
-l
/--links
is in use (Nick Craig-Wood)
+- Fix crash with
--metadata
on Android (Nick Craig-Wood)
+
+- Cache
+
+- Fix backends shutting down when in use when used via the rc (Nick Craig-Wood)
+
+- Crypt
+
+- Add
--crypt-suffix
option to set a custom suffix for encrypted files (jladbrook)
+- Add
--crypt-pass-bad-blocks
to allow corrupted file output (Nick Craig-Wood)
+- Fix reading 0 length files (Nick Craig-Wood)
+- Try not to return "unexpected EOF" error (Nick Craig-Wood)
+- Reduce allocations (albertony)
+- Recommend Dropbox for
base32768
encoding (Nick Craig-Wood)
+
+- Azure Blob
+
+- Empty directory markers (Nick Craig-Wood)
+- Support azure workload identities (Tareq Sharafy)
+- Fix azure blob uploads with multiple bits of metadata (Nick Craig-Wood)
+- Fix azurite compatibility by sending nil tier if set to empty string (Roel Arents)
+
+- Combine
+
+- Implement missing methods (Nick Craig-Wood)
+- Fix goroutine stack overflow on bad object (Nick Craig-Wood)
+
+- Drive
+
+- Add
--drive-env-auth
to get IAM credentials from runtime (Peter Brunner)
+- Update drive service account guide (Juang, Yi-Lin)
+- Fix change notify picking up files outside the root (Nick Craig-Wood)
+- Fix trailing slash mis-identificaton of folder as file (Nick Craig-Wood)
+- Fix incorrect remote after Update on object (Nick Craig-Wood)
+
+- Dropbox
+
+- Implement
--dropbox-pacer-min-sleep
flag (Nick Craig-Wood)
+- Fix the dropbox batcher stalling (Misty)
+
+- Fichier
+
+- Add
--ficicher-cdn
option to use the CDN for download (Nick Craig-Wood)
+
+- FTP
+
+- Lower log message priority when
SetModTime
is not supported to debug (Tobias Gion)
+- Fix "unsupported LIST line" errors on startup (Nick Craig-Wood)
+- Fix "501 Not a valid pathname." errors when creating directories (Nick Craig-Wood)
+
+- Google Cloud Storage
+
+- Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+- Added
--gcs-user-project
needed for requester pays (Christopher Merry)
+
+- HTTP
+
+- Add client certificate user auth middleware. This can auth
serve restic
from the username in the client cert. (Peter Fern)
+
+- Jottacloud
+
+- Fix vfs writeback stuck in a failed upload loop with file versioning disabled (albertony)
+
+- Onedrive
+
+- Add
--onedrive-av-override
flag to download files flagged as virus (Nick Craig-Wood)
+- Fix quickxorhash on 32 bit architectures (Nick Craig-Wood)
+- Report any list errors during
rclone cleanup
(albertony)
+
+- Putio
+
+- Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
+- Fix modification times not being preserved for server side copy and move (Nick Craig-Wood)
+- Fix server side copy failures (400 errors) (Nick Craig-Wood)
+
+- S3
+
+- Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+- Update Scaleway storage classes (Brian Starkey)
+- Fix
--s3-versions
on individual objects (Nick Craig-Wood)
+- Fix hang on aborting multpart upload with iDrive e2 (Nick Craig-Wood)
+- Fix missing "tier" metadata (Nick Craig-Wood)
+- Fix V3sign: add missing subresource delete (cc)
+- Fix Arvancloud Domain and region changes and alphabetise the provider (Ehsan Tadayon)
+- Fix Qiniu KODO quirks virtualHostStyle is false (zzq)
+
+- SFTP
+
+- Add
--sftp-host-key-algorithms
to allow specifying SSH host key algorithms (Joel)
+- Fix using
--sftp-key-use-agent
and --sftp-key-file
together needing private key file (Arnav Singh)
+- Fix move to allow overwriting existing files (Nick Craig-Wood)
+- Don't stat directories before listing them (Nick Craig-Wood)
+- Don't check remote points to a file if it ends with / (Nick Craig-Wood)
+
+- Sharefile
+
+- Disable streamed transfers as they no longer work (Nick Craig-Wood)
+
+- Smb
+
+- Code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) (albertony)
+
+- Storj
+
+- Fix "uplink: too many requests" errors when uploading to the same file (Nick Craig-Wood)
+- Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
+
+- Swift
+
+- Ignore 404 error when deleting an object (Nick Craig-Wood)
+
+- Union
+
+- Implement missing methods (Nick Craig-Wood)
+- Allow errors to be unwrapped for inspection (Nick Craig-Wood)
+
+- Uptobox
+
+- Add
--uptobox-private
flag to make all uploaded files private (Nick Craig-Wood)
+- Fix improper regex (Aaron Gokaslan)
+- Fix Update returning the wrong object (Nick Craig-Wood)
+- Fix rmdir declaring that directories weren't empty (Nick Craig-Wood)
+
+- WebDAV
+
+- nextcloud: Add support for chunked uploads (Paul)
+- Set modtime using propset for owncloud and nextcloud (WeidiDeng)
+- Make pacer minSleep configurable with
--webdav-pacer-min-sleep
(ed)
+- Fix server side copy/move not overwriting (WeidiDeng)
+- Fix modtime on server side copy for owncloud and nextcloud (Nick Craig-Wood)
+
+- Yandex
+
+- Fix 400 Bad Request on transfer failure (Nick Craig-Wood)
+
+- Zoho
+
+- Fix downloads with
Range:
header returning the wrong data (Nick Craig-Wood)
+
+
+v1.62.2 - 2023-03-16
+See commits
+
+- Bug Fixes
+
+- docker volume plugin: Add missing fuse3 dependency (Nick Craig-Wood)
+- docs: Fix size documentation (asdffdsazqqq)
+
+- FTP
+
+- Fix 426 errors on downloads with vsftpd (Lesmiscore)
+
+
+v1.62.1 - 2023-03-15
+See commits
+
+- Bug Fixes
+
+- docker: Add missing fuse3 dependency (cycneuramus)
+- build: Update release docs to be more careful with the tag (Nick Craig-Wood)
+- build: Set Github release to draft while uploading binaries (Nick Craig-Wood)
+
+
v1.62.0 - 2023-03-14
See commits
@@ -32883,8 +34011,8 @@ $ tree /tmp/b
- Calculate hashes for uploads from local disk (Nick Craig-Wood)
-- This allows crypted Jottacloud uploads without using local disk
-- This means crypted s3/b2 uploads will now have hashes
+- This allows encrypted Jottacloud uploads without using local disk
+- This means encrypted s3/b2 uploads will now have hashes
- Added
rclone backend decode
/encode
commands to replicate functionality of cryptdecode
(Anagh Kumar Baranwal)
- Get rid of the unused Cipher interface as it obfuscated the code (Nick Craig-Wood)
@@ -34690,7 +35818,7 @@ $ tree /tmp/b
- Crypt
-- Check the crypted hash of files when uploading for extra data security
+- Check the encrypted hash of files when uploading for extra data security
- Dropbox
@@ -35245,7 +36373,7 @@ $ tree /tmp/b
rcat
- read from standard input and stream upload
tree
- shows a nicely formatted recursive listing
-cryptdecode
- decode crypted file names (thanks ishuah)
+cryptdecode
- decode encrypted file names (thanks ishuah)
config show
- print the config file
config file
- print the config file location
@@ -35688,7 +36816,7 @@ $ tree /tmp/b
- Delete src files which already existed in dst
- Fix deletion of src file when dst file older
-Fix rclone check
on crypted file systems
+Fix rclone check
on encrypted file systems
Make failed uploads not count as "Transferred"
Make sure high level retries show with -q
Use a vendor directory with godep for repeatable builds
@@ -36456,7 +37584,7 @@ $ tree /tmp/b
Project started
Bugs and Limitations
-Limitations
+Limitations
Directory timestamps aren't preserved
Rclone doesn't currently preserve the timestamps of directories. This is because rclone only really considers objects when syncing.
Rclone struggles with millions of files in a directory/bucket
@@ -36544,7 +37672,14 @@ ntpclient -s -h pool.ntp.org
dig www.googleapis.com # resolve using your default DNS
dig www.googleapis.com @8.8.8.8 # resolve with Google's DNS server
If you are using systemd-resolved
(default on Arch Linux), ensure it is at version 233 or higher. Previous releases contain a bug which causes not all domains to be resolved properly.
-Additionally with the GODEBUG=netdns=
environment variable the Go resolver decision can be influenced. This also allows to resolve certain issues with DNS resolution. See the name resolution section in the go docs.
+The Go resolver decision can be influenced with the GODEBUG=netdns=...
environment variable. This also allows to resolve certain issues with DNS resolution. On Windows or MacOS systems, try forcing use of the internal Go resolver by setting GODEBUG=netdns=go
at runtime. On other systems (Linux, *BSD, etc) try forcing use of the system name resolver by setting GODEBUG=netdns=cgo
(and recompile rclone from source with CGO enabled if necessary). See the name resolution section in the go docs.
+Failed to start auth webserver on Windows
+Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+...
+yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+This is sometimes caused by the Host Network Service causing issues with opening the port on the host.
+A simple solution may be restarting the Host Network Service with eg. Powershell
+Restart-Service hns
The total size reported in the stats for a sync is wrong and keeps changing
It is likely you have more than 10,000 files that need to be synced. By default, rclone only gets 10,000 files ahead in a sync so as not to use up too much memory. You can change this default with the --max-backlog flag.
Rclone is using too much memory or appears to have a memory leak
@@ -36581,7 +37716,7 @@ THE SOFTWARE.
Nick Craig-Wood nick@craig-wood.com
Contributors
-{{< rem email addresses removed from here need to be addeed to bin/.ignore-emails to make sure update-authors.py doesn't immediately put them back in again.
>}}
+{{< rem email addresses removed from here need to be added to bin/.ignore-emails to make sure update-authors.py doesn't immediately put them back in again.
>}}
Forum
diff --git a/MANUAL.md b/MANUAL.md
index 430ad23bc..47abfb346 100644
--- a/MANUAL.md
+++ b/MANUAL.md
@@ -1,6 +1,6 @@
% rclone(1) User Manual
% Nick Craig-Wood
-% Mar 14, 2023
+% Jun 30, 2023
# Rclone syncs your files to cloud storage
@@ -118,6 +118,7 @@ WebDAV or S3, that work out of the box.)
- Dreamhost
- Dropbox
- Enterprise File Fabric
+- Fastmail Files
- FTP
- Google Cloud Storage
- Google Drive
@@ -142,12 +143,15 @@ WebDAV or S3, that work out of the box.)
- Minio
- Nextcloud
- OVH
+- Blomp Cloud Storage
- OpenDrive
- OpenStack Swift
- Oracle Cloud Storage Swift
- Oracle Object Storage
- ownCloud
- pCloud
+- Petabox
+- PikPak
- premiumize.me
- put.io
- QingStor
@@ -337,9 +341,14 @@ feature then you will need to install the third party utility
[Winget](https://learn.microsoft.com/en-us/windows/package-manager/) comes pre-installed with the latest versions of Windows. If not, update the [App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1) package from the Microsoft store.
+To install rclone
```
winget install Rclone.Rclone
```
+To uninstall rclone
+```
+winget uninstall Rclone.Rclone --force
+```
### Chocolatey package manager {#windows-chocolatey}
@@ -449,10 +458,16 @@ Here are some commands tested on an Ubuntu 18.04.3 host:
# config on host at ~/.config/rclone/rclone.conf
# data on host at ~/data
+# add a remote interactively
+docker run --rm -it \
+ --volume ~/.config/rclone:/config/rclone \
+ --user $(id -u):$(id -g) \
+ rclone/rclone \
+ config
+
# make sure the config is ok by listing the remotes
docker run --rm \
--volume ~/.config/rclone:/config/rclone \
- --volume ~/data:/data:shared \
--user $(id -u):$(id -g) \
rclone/rclone \
listremotes
@@ -802,10 +817,11 @@ See the following for detailed instructions for
* [Memory](https://rclone.org/memory/)
* [Microsoft Azure Blob Storage](https://rclone.org/azureblob/)
* [Microsoft OneDrive](https://rclone.org/onedrive/)
- * [OpenStack Swift / Rackspace Cloudfiles / Memset Memstore](https://rclone.org/swift/)
+ * [OpenStack Swift / Rackspace Cloudfiles / Blomp Cloud Storage / Memset Memstore](https://rclone.org/swift/)
* [OpenDrive](https://rclone.org/opendrive/)
* [Oracle Object Storage](https://rclone.org/oracleobjectstorage/)
* [Pcloud](https://rclone.org/pcloud/)
+ * [PikPak](https://rclone.org/pikpak/)
* [premiumize.me](https://rclone.org/premiumizeme/)
* [put.io](https://rclone.org/putio/)
* [QingStor](https://rclone.org/qingstor/)
@@ -1235,7 +1251,7 @@ match. It doesn't alter the source or destination.
For the [crypt](https://rclone.org/crypt/) remote there is a dedicated command,
[cryptcheck](https://rclone.org/commands/rclone_cryptcheck/), that are able to check
-the checksums of the crypted files.
+the checksums of the encrypted files.
If you supply the `--size-only` flag, it will only compare the sizes not
the hashes as well. Use this for a quick check.
@@ -1269,6 +1285,9 @@ you what happened to it. These are reminiscent of diff files.
- `* path` means path was present in source and destination but different.
- `! path` means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the [--checkers=N](https://rclone.org/docs/#checkers-n)
+option for more information.
+
```
rclone check source:path dest:path [flags]
@@ -1593,7 +1612,7 @@ recursion.
Some backends do not always provide file sizes, see for example
[Google Photos](https://rclone.org/googlephotos/#size) and
-[Google Drive](https://rclone.org/drive/#limitations-of-google-docs).
+[Google Docs](https://rclone.org/drive/#limitations-of-google-docs).
Rclone will then show a notice in the log indicating how many such
files were encountered, and count them in as empty files in the output
of the size command.
@@ -2066,6 +2085,18 @@ the end and `--offset` and `--count` to print a section in the middle.
Note that if offset is negative it will count from the end, so
`--offset -1 --count 1` is equivalent to `--tail 1`.
+Use the `--separator` flag to print a separator value between files. Be sure to
+shell-escape special characters. For example, to print a newline between
+files, use:
+
+* bash:
+
+ rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
+
+* powershell:
+
+ rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
+
```
rclone cat remote:path [flags]
@@ -2074,12 +2105,13 @@ rclone cat remote:path [flags]
## Options
```
- --count int Only print N characters (default -1)
- --discard Discard the output instead of printing
- --head int Only print the first N characters
- -h, --help help for cat
- --offset int Start printing at offset N (or from end if -ve)
- --tail int Only print the last N characters
+ --count int Only print N characters (default -1)
+ --discard Discard the output instead of printing
+ --head int Only print the first N characters
+ -h, --help help for cat
+ --offset int Start printing at offset N (or from end if -ve)
+ --separator string Separator to use between objects when printing multiple files
+ --tail int Only print the last N characters
```
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
@@ -2126,6 +2158,9 @@ you what happened to it. These are reminiscent of diff files.
- `* path` means path was present in source and destination but different.
- `! path` means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the [--checkers=N](https://rclone.org/docs/#checkers-n)
+option for more information.
+
```
rclone checksum sumfile src:path [flags]
@@ -2153,12 +2188,13 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone completion
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
## Synopsis
-Generate the autocompletion script for rclone for the specified shell.
-See each sub-command's help for details on how to use the generated script.
+
+Generates a shell completion script for rclone.
+Run with `--help` to list the supported shells.
## Options
@@ -2172,97 +2208,97 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
## SEE ALSO
* [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends.
-* [rclone completion bash](https://rclone.org/commands/rclone_completion_bash/) - Generate the autocompletion script for bash
-* [rclone completion fish](https://rclone.org/commands/rclone_completion_fish/) - Generate the autocompletion script for fish
-* [rclone completion powershell](https://rclone.org/commands/rclone_completion_powershell/) - Generate the autocompletion script for powershell
-* [rclone completion zsh](https://rclone.org/commands/rclone_completion_zsh/) - Generate the autocompletion script for zsh
+* [rclone completion bash](https://rclone.org/commands/rclone_completion_bash/) - Output bash completion script for rclone.
+* [rclone completion fish](https://rclone.org/commands/rclone_completion_fish/) - Output fish completion script for rclone.
+* [rclone completion zsh](https://rclone.org/commands/rclone_completion_zsh/) - Output zsh completion script for rclone.
# rclone completion bash
-Generate the autocompletion script for bash
+Output bash completion script for rclone.
## Synopsis
-Generate the autocompletion script for the bash shell.
-This script depends on the 'bash-completion' package.
-If it is not installed already, you can install it via your OS's package manager.
+Generates a bash shell autocompletion script for rclone.
-To load completions in your current shell session:
+This writes to /etc/bash_completion.d/rclone by default so will
+probably need to be run with sudo or as root, e.g.
- source <(rclone completion bash)
+ sudo rclone genautocomplete bash
-To load completions for every new session, execute once:
+Logout and login again to use the autocompletion scripts, or source
+them directly
-### Linux:
+ . /etc/bash_completion
- rclone completion bash > /etc/bash_completion.d/rclone
+If you supply a command line argument the script will be written
+there.
-### macOS:
-
- rclone completion bash > $(brew --prefix)/etc/bash_completion.d/rclone
-
-You will need to start a new shell for this setup to take effect.
+If output_file is "-", then the output will be written to stdout.
```
-rclone completion bash
+rclone completion bash [output_file] [flags]
```
## Options
```
- -h, --help help for bash
- --no-descriptions disable completion descriptions
+ -h, --help help for bash
```
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
## SEE ALSO
-* [rclone completion](https://rclone.org/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell.
# rclone completion fish
-Generate the autocompletion script for fish
+Output fish completion script for rclone.
## Synopsis
-Generate the autocompletion script for the fish shell.
-To load completions in your current shell session:
+Generates a fish autocompletion script for rclone.
- rclone completion fish | source
+This writes to /etc/fish/completions/rclone.fish by default so will
+probably need to be run with sudo or as root, e.g.
-To load completions for every new session, execute once:
+ sudo rclone genautocomplete fish
- rclone completion fish > ~/.config/fish/completions/rclone.fish
+Logout and login again to use the autocompletion scripts, or source
+them directly
-You will need to start a new shell for this setup to take effect.
+ . /etc/fish/completions/rclone.fish
+
+If you supply a command line argument the script will be written
+there.
+
+If output_file is "-", then the output will be written to stdout.
```
-rclone completion fish [flags]
+rclone completion fish [output_file] [flags]
```
## Options
```
- -h, --help help for fish
- --no-descriptions disable completion descriptions
+ -h, --help help for fish
```
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
## SEE ALSO
-* [rclone completion](https://rclone.org/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell.
# rclone completion powershell
Generate the autocompletion script for powershell
-## Synopsis
+# Synopsis
Generate the autocompletion script for powershell.
@@ -2278,7 +2314,7 @@ to your powershell profile.
rclone completion powershell [flags]
```
-## Options
+# Options
```
-h, --help help for powershell
@@ -2287,56 +2323,50 @@ rclone completion powershell [flags]
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone completion](https://rclone.org/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
# rclone completion zsh
-Generate the autocompletion script for zsh
+Output zsh completion script for rclone.
## Synopsis
-Generate the autocompletion script for the zsh shell.
-If shell completion is not already enabled in your environment you will need
-to enable it. You can execute the following once:
+Generates a zsh autocompletion script for rclone.
- echo "autoload -U compinit; compinit" >> ~/.zshrc
+This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
+probably need to be run with sudo or as root, e.g.
-To load completions in your current shell session:
+ sudo rclone genautocomplete zsh
- source <(rclone completion zsh); compdef _rclone rclone
+Logout and login again to use the autocompletion scripts, or source
+them directly
-To load completions for every new session, execute once:
+ autoload -U compinit && compinit
-### Linux:
+If you supply a command line argument the script will be written
+there.
- rclone completion zsh > "${fpath[1]}/_rclone"
-
-### macOS:
-
- rclone completion zsh > $(brew --prefix)/share/zsh/site-functions/_rclone
-
-You will need to start a new shell for this setup to take effect.
+If output_file is "-", then the output will be written to stdout.
```
-rclone completion zsh [flags]
+rclone completion zsh [output_file] [flags]
```
## Options
```
- -h, --help help for zsh
- --no-descriptions disable completion descriptions
+ -h, --help help for zsh
```
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
## SEE ALSO
-* [rclone completion](https://rclone.org/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell.
# rclone config create
@@ -2991,14 +3021,14 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone cryptcheck
-Cryptcheck checks the integrity of a crypted remote.
+Cryptcheck checks the integrity of an encrypted remote.
## Synopsis
rclone cryptcheck checks a remote against a [crypted](https://rclone.org/crypt/) remote.
This is the equivalent of running rclone [check](https://rclone.org/commands/rclone_check/),
-but able to check the checksums of the crypted remote.
+but able to check the checksums of the encrypted remote.
For it to work the underlying remote of the cryptedremote must support
some kind of checksum.
@@ -3040,6 +3070,9 @@ you what happened to it. These are reminiscent of diff files.
- `* path` means path was present in source and destination but different.
- `! path` means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the [--checkers=N](https://rclone.org/docs/#checkers-n)
+option for more information.
+
```
rclone cryptcheck remote:path cryptedremote:path [flags]
@@ -3135,14 +3168,14 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
Output completion script for a given shell.
-## Synopsis
+# Synopsis
Generates a shell completion script for rclone.
Run with `--help` to list the supported shells.
-## Options
+# Options
```
-h, --help help for genautocomplete
@@ -3150,7 +3183,7 @@ Run with `--help` to list the supported shells.
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone genautocomplete bash](https://rclone.org/commands/rclone_genautocomplete_bash/) - Output bash completion script for rclone.
@@ -3161,7 +3194,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
Output bash completion script for rclone.
-## Synopsis
+# Synopsis
Generates a bash shell autocompletion script for rclone.
@@ -3186,7 +3219,7 @@ If output_file is "-", then the output will be written to stdout.
rclone genautocomplete bash [output_file] [flags]
```
-## Options
+# Options
```
-h, --help help for bash
@@ -3194,7 +3227,7 @@ rclone genautocomplete bash [output_file] [flags]
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone genautocomplete](https://rclone.org/commands/rclone_genautocomplete/) - Output completion script for a given shell.
@@ -3202,7 +3235,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
Output fish completion script for rclone.
-## Synopsis
+# Synopsis
Generates a fish autocompletion script for rclone.
@@ -3227,7 +3260,7 @@ If output_file is "-", then the output will be written to stdout.
rclone genautocomplete fish [output_file] [flags]
```
-## Options
+# Options
```
-h, --help help for fish
@@ -3235,7 +3268,7 @@ rclone genautocomplete fish [output_file] [flags]
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone genautocomplete](https://rclone.org/commands/rclone_genautocomplete/) - Output completion script for a given shell.
@@ -3243,7 +3276,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
Output zsh completion script for rclone.
-## Synopsis
+# Synopsis
Generates a zsh autocompletion script for rclone.
@@ -3268,7 +3301,7 @@ If output_file is "-", then the output will be written to stdout.
rclone genautocomplete zsh [output_file] [flags]
```
-## Options
+# Options
```
-h, --help help for zsh
@@ -3276,7 +3309,7 @@ rclone genautocomplete zsh [output_file] [flags]
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone genautocomplete](https://rclone.org/commands/rclone_genautocomplete/) - Output completion script for a given shell.
@@ -3421,7 +3454,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone listremotes
-List all the remotes in the config file.
+List all the remotes in the config file and defined in environment variables.
## Synopsis
@@ -3994,6 +4027,17 @@ Mounting on macOS can be done either via [macFUSE](https://osxfuse.github.io/)
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
which "mounts" via an NFSv4 local server.
+### macFUSE Notes
+
+If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
+the website, rclone will locate the macFUSE libraries without any further intervention.
+If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
+the following addition steps are required.
+
+ sudo mkdir /usr/local/lib
+ cd /usr/local/lib
+ sudo ln -s /opt/local/lib/libfuse.2.dylib
+
### FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are current as
@@ -4130,20 +4174,19 @@ or create systemd mount units:
```
# /etc/systemd/system/mnt-data.mount
[Unit]
-After=network-online.target
+Description=Mount for /mnt/data
[Mount]
Type=rclone
What=sftp1:subdir
Where=/mnt/data
-Options=rw,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
```
optionally accompanied by systemd automount unit
```
# /etc/systemd/system/mnt-data.automount
[Unit]
-After=network-online.target
-Before=remote-fs.target
+Description=AutoMount for /mnt/data
[Automount]
Where=/mnt/data
TimeoutIdleSec=600
@@ -4256,7 +4299,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -4279,7 +4322,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -4524,6 +4578,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for mount
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -4535,7 +4590,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -4856,10 +4911,11 @@ and actually stream it, even if remote backend doesn't support streaming.
size of the stream is different in length to the `--size` passed in
then the transfer will likely fail.
-Note that the upload can also not be retried because the data is
-not kept around until the upload succeeds. If you need to transfer
-a lot of data, you're better off caching locally and then
-`rclone move` it to the destination.
+Note that the upload cannot be retried because the data is not stored.
+If the backend supports multipart uploading then individual chunks can
+be retried. If you need to transfer a lot of data, you may be better
+off caching it locally and then `rclone move` it to the
+destination which can use retries.
```
rclone rcat remote:path [flags]
@@ -4897,54 +4953,54 @@ See the [rc documentation](https://rclone.org/rc/) for more info on the rc flags
## Server options
-Use `--addr` to specify which IP address and port the server should
-listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all
+Use `--rc-addr` to specify which IP address and port the server should
+listen on, eg `--rc-addr 1.2.3.4:8000` or `--rc-addr :8080` to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
-If you set `--addr` to listen on a public or LAN accessible IP address
+If you set `--rc-addr` to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
You can use a unix socket by setting the url to `unix:///path/to/socket`
or just by using an absolute path name. Note that unix sockets bypass the
authentication - this is expected to be done with file system permissions.
-`--addr` may be repeated to listen on multiple IPs/ports/sockets.
+`--rc-addr` may be repeated to listen on multiple IPs/ports/sockets.
-`--server-read-timeout` and `--server-write-timeout` can be used to
+`--rc-server-read-timeout` and `--rc-server-write-timeout` can be used to
control the timeouts on the server. Note that this is the total time
for a transfer.
-`--max-header-bytes` controls the maximum number of bytes the server will
+`--rc-max-header-bytes` controls the maximum number of bytes the server will
accept in the HTTP header.
-`--baseurl` controls the URL prefix that rclone serves from. By default
-rclone will serve from the root. If you used `--baseurl "/rclone"` then
+`--rc-baseurl` controls the URL prefix that rclone serves from. By default
+rclone will serve from the root. If you used `--rc-baseurl "/rclone"` then
rclone would serve from a URL starting with "/rclone/". This is
useful if you wish to proxy rclone serve. Rclone automatically
-inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`,
-`--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated
+inserts leading and trailing "/" on `--rc-baseurl`, so `--rc-baseurl "rclone"`,
+`--rc-baseurl "/rclone"` and `--rc-baseurl "/rclone/"` are all treated
identically.
### TLS (SSL)
By default this will serve over http. If you want you can serve over
-https. You will need to supply the `--cert` and `--key` flags.
+https. You will need to supply the `--rc-cert` and `--rc-key` flags.
If you wish to do client side certificate validation then you will need to
-supply `--client-ca` also.
+supply `--rc-client-ca` also.
-`--cert` should be a either a PEM encoded certificate or a concatenation
-of that with the CA certificate. `--key` should be the PEM encoded
-private key and `--client-ca` should be the PEM encoded client
+`--rc-cert` should be a either a PEM encoded certificate or a concatenation
+of that with the CA certificate. `--krc-ey` should be the PEM encoded
+private key and `--rc-client-ca` should be the PEM encoded client
certificate authority certificate.
---min-tls-version is minimum TLS version that is acceptable. Valid
+--rc-min-tls-version is minimum TLS version that is acceptable. Valid
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
"tls1.0").
### Template
-`--template` allows a user to specify a custom markup template for HTTP
+`--rc-template` allows a user to specify a custom markup template for HTTP
and WebDAV serve functions. The server exports the following markup
to be used within the template to server pages:
@@ -4972,9 +5028,13 @@ to be used within the template to server pages:
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
-set a single username and password with the `--user` and `--pass` flags.
+set a single username and password with the `--rc-user` and `--rc-pass` flags.
-Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
+Use `--rc-htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -4986,9 +5046,9 @@ To create an htpasswd file:
The password file can be updated while rclone is running.
-Use `--realm` to set the authentication realm.
+Use `--rc-realm` to set the authentication realm.
-Use `--salt` to change the password hashing salt from the default.
+Use `--rc-salt` to change the password hashing salt from the default.
```
@@ -5268,7 +5328,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -5291,7 +5351,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -5535,7 +5606,7 @@ rclone serve dlna remote:path [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -5679,7 +5750,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -5702,7 +5773,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -5949,6 +6031,7 @@ rclone serve docker [flags]
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for docker
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -5963,7 +6046,7 @@ rclone serve docker [flags]
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -6090,7 +6173,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -6113,7 +6196,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -6441,7 +6535,7 @@ rclone serve ftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default "anonymous")
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -6560,6 +6654,10 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the `--user` and `--pass` flags.
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -6652,7 +6750,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -6675,7 +6773,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -7011,7 +7120,7 @@ rclone serve http remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -7175,6 +7284,10 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the `--user` and `--pass` flags.
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -7279,7 +7392,7 @@ which can lead to "corrupted on transfer" errors. This is the case because
the client chooses indiscriminately which server to send commands to while
the servers all have different views of the state of the filing system.
-The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from beeing
+The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from being
used. Omitting "restrict" and using `--sftp-path-override` to enable
checksumming is possible but less secure and you could use the SFTP server
provided by OpenSSH in this case.
@@ -7361,7 +7474,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -7384,7 +7497,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -7712,7 +7836,7 @@ rclone serve sftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -7860,6 +7984,10 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the `--user` and `--pass` flags.
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -7952,7 +8080,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -7975,7 +8103,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -8313,7 +8452,7 @@ rclone serve webdav remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -8389,7 +8528,7 @@ Run a test command
Rclone test is used to run test commands.
-Select which test comand you want with the subcommand, eg
+Select which test command you want with the subcommand, eg
rclone test memory remote:
@@ -9399,7 +9538,7 @@ they are incorrect as it would normally.
### --color WHEN ###
-Specifiy when colors (and other ANSI codes) should be added to the output.
+Specify when colors (and other ANSI codes) should be added to the output.
`AUTO` (default) only allows ANSI codes when the output is a terminal
@@ -9506,6 +9645,22 @@ You may also choose to [encrypt](#configuration-encryption) the file.
When token-based authentication are used, the configuration file
must be writable, because rclone needs to update the tokens inside it.
+To reduce risk of corrupting an existing configuration file, rclone
+will not write directly to it when saving changes. Instead it will
+first write to a new, temporary, file. If a configuration file already
+existed, it will (on Unix systems) try to mirror its permissions to
+the new file. Then it will rename the existing file to a temporary
+name as backup. Next, rclone will rename the new file to the correct name,
+before finally cleaning up by deleting the backup file.
+
+If the configuration file path used by rclone is a symbolic link, then
+this will be evaluated and rclone will write to the resolved path, instead
+of overwriting the symbolic link. Temporary files used in the process
+(described above) will be written to the same parent directory as that
+of the resolved configuration file, but if this directory is also a
+symbolic link it will not be resolved and the temporary files will be
+written to the location of the directory symbolic link.
+
### --contimeout=TIME ###
Set the connection timeout. This should be in go time format which
@@ -9534,6 +9689,18 @@ Mode to run dedupe command in. One of `interactive`, `skip`, `first`,
`newest`, `oldest`, `rename`. The default is `interactive`.
See the dedupe command for more information as to what these options mean.
+### --default-time TIME ###
+
+If a file or directory does have a modification time rclone can read
+then rclone will display this fixed time instead.
+
+The default is `2000-01-01 00:00:00 UTC`. This can be configured in
+any of the ways shown in [the time or duration options](#time-option).
+
+For example `--default-time 2020-06-01` to set the default time to the
+1st of June 2020 or `--default-time 0s` to set the default time to the
+time rclone started up.
+
### --disable FEATURE,FEATURE,... ###
This disables a comma separated list of optional features. For example
@@ -9547,10 +9714,24 @@ To see a list of which features can be disabled use:
--disable help
+The features a remote has can be seen in JSON format with:
+
+ rclone backend features remote:
+
See the overview [features](https://rclone.org/overview/#features) and
[optional features](https://rclone.org/overview/#optional-features) to get an idea of
which feature does what.
+Note that some features can be set to `true` if they are `true`/`false`
+feature flag features by prefixing them with `!`. For example the
+`CaseInsensitive` feature can be forced to `false` with `--disable CaseInsensitive`
+and forced to `true` with `--disable '!CaseInsensitive'`. In general
+it isn't a good idea doing this but it may be useful in extremis.
+
+(Note that `!` is a shell command which you will
+need to escape with single quotes or a backslash on unix like
+platforms.)
+
This flag can be useful for debugging and in exceptional circumstances
(e.g. Google Drive limiting the total volume of Server Side Copies to
100 GiB/day).
@@ -9773,6 +9954,49 @@ This can be useful as an additional layer of protection for immutable
or append-only data sets (notably backup archives), where modification
implies corruption and should not be propagated.
+### --inplace {#inplace}
+
+The `--inplace` flag changes the behaviour of rclone when uploading
+files to some backends (backends with the `PartialUploads` feature
+flag set) such as:
+
+- local
+- ftp
+- sftp
+
+Without `--inplace` (the default) rclone will first upload to a
+temporary file with an extension like this where `XXXXXX` represents a
+random string.
+
+ original-file-name.XXXXXX.partial
+
+(rclone will make sure the final name is no longer than 100 characters
+by truncating the `original-file-name` part if necessary).
+
+When the upload is complete, rclone will rename the `.partial` file to
+the correct name, overwriting any existing file at that point. If the
+upload fails then the `.partial` file will be deleted.
+
+This prevents other users of the backend from seeing partially
+uploaded files in their new names and prevents overwriting the old
+file until the new one is completely uploaded.
+
+If the `--inplace` flag is supplied, rclone will upload directly to
+the final name without creating a `.partial` file.
+
+This means that an incomplete file will be visible in the directory
+listings while the upload is in progress and any existing files will
+be overwritten as soon as the upload starts. If the transfer fails
+then the file will be deleted. This can cause data loss of the
+existing file if the transfer fails.
+
+Note that on the local file system if you don't use `--inplace` hard
+links (Unix only) will be broken. And if you do use `--inplace` you
+won't be able to update in use executables.
+
+Note also that versions of rclone prior to v1.63.0 behave as if the
+`--inplace` flag is always supplied.
+
### -i, --interactive {#interactive}
This flag can be used to tell rclone that you wish a manual
@@ -9984,6 +10208,25 @@ if you are reading and writing to an OS X filing system this will be
This command line flag allows you to override that computed default.
+### --multi-thread-write-buffer-size=SIZE ###
+
+When downloading with multiple threads, rclone will buffer SIZE bytes in
+memory before writing to disk for each thread.
+
+This can improve performance if the underlying filesystem does not deal
+well with a lot of small writes in different positions of the file, so
+if you see downloads being limited by disk write speed, you might want
+to experiment with different values. Specially for magnetic drives and
+remote file systems a higher value can be useful.
+
+Nevertheless, the default of `128k` should be fine for almost all use
+cases, so before changing it ensure that network is not really your
+bottleneck.
+
+As a final hint, size is not the only factor: block size (or similar
+concept) can have an impact. In one case, we observed that exact
+multiples of 16k performed much better than other values.
+
### --multi-thread-cutoff=SIZE ###
When downloading files to the local backend above this size, rclone
@@ -10385,6 +10628,12 @@ would be backed up to `file.txt-2019-01-01` and with the flag it would
be backed up to `file-2019-01-01.txt`. This can be helpful to make
sure the suffixed files can still be opened.
+If a file has two (or more) extensions and the second (or subsequent)
+extension is recognised as a valid mime type, then the suffix will go
+before that extension. So `file.tar.gz` would be backed up to
+`file-2019-01-01.tar.gz` whereas `file.badextension.gz` would be
+backed up to `file.badextension-2019-01-01.gz`.
+
### --syslog ###
On capable OSes (not Windows or Plan9) send all log output to syslog.
@@ -11402,7 +11651,7 @@ Which will match a directory called `start` with a file called
`end.jpg` in it as the `.*` will match `/` characters.
Note that you can use `-vv --dump filters` to show the filter patterns
-in regexp format - rclone implements the glob patters by transforming
+in regexp format - rclone implements the glob patterns by transforming
them into regular expressions.
## Filter pattern examples {#examples}
@@ -12725,7 +12974,7 @@ See the [config dump](https://rclone.org/commands/rclone_config_dump/) command f
**Authentication is required for this call.**
-### config/listremotes: Lists the remotes in the config file. {#config-listremotes}
+### config/listremotes: Lists the remotes in the config file and defined in environment variables. {#config-listremotes}
Returns
- remotes - array of remote names
@@ -13299,9 +13548,9 @@ See the [cleanup](https://rclone.org/commands/rclone_cleanup/) command for more
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
**Authentication is required for this call.**
@@ -13496,9 +13745,9 @@ See the [mkdir](https://rclone.org/commands/rclone_mkdir/) command for more info
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
**Authentication is required for this call.**
@@ -14220,6 +14469,7 @@ Here is an overview of the major features of each cloud storage system.
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
+| PikPak | MD5 | R | No | No | R | - |
| premiumize.me | - | - | Yes | No | R | - |
| put.io | CRC-32 | R/W | No | Yes | R | - |
| QingStor | MD5 | - ⁹ | No | No | R/W | - |
@@ -14244,9 +14494,9 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
² SFTP supports checksums if the same login has shell access and
`md5sum` or `sha1sum` as well as `echo` are in the remote's PATH.
-³ WebDAV supports hashes when used with Owncloud and Nextcloud only.
+³ WebDAV supports hashes when used with Fastmail Files. Owncloud and Nextcloud only.
-⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
+⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and Nextcloud only.
⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) is Microsoft's own hash.
@@ -14648,7 +14898,7 @@ upon backend-specific capabilities.
| Amazon S3 (or S3 compatible) | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No |
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No |
| Box | Yes | Yes | Yes | Yes | Yes ‡‡ | No | Yes | Yes | Yes | Yes |
-| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | Yes | No | No | Yes |
+| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | Yes |
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | Yes |
| FTP | No | No | Yes | Yes | No | No | Yes | No | No | Yes |
@@ -14670,6 +14920,7 @@ upon backend-specific capabilities.
| OpenStack Swift | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
+| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes |
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | Yes | Yes |
| QingStor | No | Yes | No | No | Yes | Yes | No | No | No | No |
@@ -14782,166 +15033,169 @@ split into two groups, non backend and backend flags.
These flags are available for every command.
```
- --ask-password Allow prompt for password for encrypted configuration (default true)
- --auto-confirm If enabled, do not request console confirmation
- --backup-dir string Make backups into hierarchy based in DIR
- --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
- --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
- --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --ca-cert stringArray CA certificate used to verify servers
- --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
- --check-first Do all the checks before starting transfers
- --checkers int Number of checkers to run in parallel (default 8)
- -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
- --client-cert string Client SSL certificate (PEM) for mutual TLS auth
- --client-key string Client SSL private key (PEM) for mutual TLS auth
- --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
- --compare-dest stringArray Include additional comma separated server-side paths during comparison
- --config string Config file (default "$HOME/.config/rclone/rclone.conf")
- --contimeout Duration Connect timeout (default 1m0s)
- --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
- --cpuprofile string Write cpu profile to file
- --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
- --delete-after When synchronizing, delete files on destination after transferring (default)
- --delete-before When synchronizing, delete files on destination before transferring
- --delete-during When synchronizing, delete files during transfer
- --delete-excluded Delete files on dest excluded from sync
- --disable string Disable a comma separated list of features (use --disable help to see a list)
- --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
- --disable-http2 Disable HTTP/2 in the global transport
- -n, --dry-run Do a trial run with no permanent changes
- --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
- --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
- --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
- --dump-headers Dump HTTP headers - may contain sensitive info
- --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
- --fast-list Use recursive list if available; uses more memory but fewer transactions
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
- --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
- --header stringArray Set HTTP header for all transactions
- --header-download stringArray Set HTTP header for download transactions
- --header-upload stringArray Set HTTP header for upload transactions
- --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
- --ignore-case Ignore case in filters (case insensitive)
- --ignore-case-sync Ignore case when synchronizing
- --ignore-checksum Skip post copy check of checksums
- --ignore-errors Delete even if there are I/O errors
- --ignore-existing Skip all files that exist on destination
- --ignore-size Ignore size when skipping use mod-time or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
- --immutable Do not modify files, fail if existing files have been modified
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- -i, --interactive Enable interactive mode
- --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
- --log-file string Log everything to this file
- --log-format string Comma separated list of log format options (default "date,time")
- --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
- --log-systemd Activate systemd integration for the logger
- --low-level-retries int Number of low level retries to do (default 10)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
- --max-delete int When synchronizing, limit the number of deletes (default -1)
- --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
- --max-transfer SizeSuffix Maximum size of data to transfer (default off)
- --memprofile string Write memory profile to file
- -M, --metadata If set, preserve metadata when copying objects
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --metadata-set stringArray Add metadata key=value when uploading
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
- --modify-window Duration Max time diff to be considered the same (default 1ns)
- --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
- --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
- --no-check-certificate Do not verify the server SSL certificate (insecure)
- --no-check-dest Don't check the destination, copy regardless
- --no-console Hide console window (supported on Windows only)
- --no-gzip-encoding Don't set Accept-Encoding: gzip
- --no-traverse Don't traverse destination file system on copy
- --no-unicode-normalization Don't normalize unicode characters in filenames
- --no-update-modtime Don't update destination mod-time if files identical
- --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
- --password-command SpaceSepList Command for supplying password for encrypted configuration
- -P, --progress Show progress during transfer
- --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
- -q, --quiet Print as little stuff as possible
- --rc Enable the remote control server
- --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
- --rc-allow-origin string Set the allowed origin for CORS
- --rc-baseurl string Prefix for URLs - leave blank for root
- --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
- --rc-client-ca string Client certificate authority to verify clients with
- --rc-enable-metrics Enable prometheus metrics on /metrics
- --rc-files string Path to local files to serve on the HTTP server
- --rc-htpasswd string A htpasswd file - if not provided no authentication is done
- --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
- --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
- --rc-key string TLS PEM Private key
- --rc-max-header-bytes int Maximum size of request header (default 4096)
- --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
- --rc-no-auth Don't require auth for certain methods
- --rc-pass string Password for authentication
- --rc-realm string Realm for authentication
- --rc-salt string Password hashing salt (default "dlPL2MqE")
- --rc-serve Enable the serving of remote objects
- --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --rc-template string User-specified template
- --rc-user string User name for authentication
- --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
- --rc-web-gui Launch WebGUI on localhost
- --rc-web-gui-force-update Force update to latest version of web gui
- --rc-web-gui-no-open-browser Don't open the browser automatically
- --rc-web-gui-update Check and update to latest version of web gui
- --refresh-times Refresh the modtime of remote files
- --retries int Retry operations this many times if they fail (default 3)
- --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
- --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
- --size-only Skip based on size only, not mod-time or checksum
- --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
- --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
- --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
- --stats-one-line Make the stats fit on one line
- --stats-one-line-date Enable --stats-one-line and add current date/time prefix
- --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
- --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
- --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
- --suffix string Suffix to add to changed files
- --suffix-keep-extension Preserve the extension when using --suffix
- --syslog Use Syslog for logging
- --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
- --temp-dir string Directory rclone will use for temporary files (default "/tmp")
- --timeout Duration IO idle timeout (default 5m0s)
- --tpslimit float Limit HTTP transactions per second to this
- --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
- --track-renames When synchronizing, track file renames and do a server-side move if possible
- --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
- --transfers int Number of file transfers to run in parallel (default 4)
- -u, --update Skip files that are newer on the destination
- --use-cookies Enable session cookiejar
- --use-json-log Use json log format
- --use-mmap Use mmap allocator (see docs)
- --use-server-modtime Use server modified time instead of object metadata
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.62.0")
- -v, --verbose count Print lots more stuff (repeat for more)
+ --ask-password Allow prompt for password for encrypted configuration (default true)
+ --auto-confirm If enabled, do not request console confirmation
+ --backup-dir string Make backups into hierarchy based in DIR
+ --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
+ --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
+ --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --ca-cert stringArray CA certificate used to verify servers
+ --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
+ --check-first Do all the checks before starting transfers
+ --checkers int Number of checkers to run in parallel (default 8)
+ -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
+ --client-cert string Client SSL certificate (PEM) for mutual TLS auth
+ --client-key string Client SSL private key (PEM) for mutual TLS auth
+ --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
+ --compare-dest stringArray Include additional comma separated server-side paths during comparison
+ --config string Config file (default "$HOME/.config/rclone/rclone.conf")
+ --contimeout Duration Connect timeout (default 1m0s)
+ --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
+ --cpuprofile string Write cpu profile to file
+ --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
+ --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
+ --delete-after When synchronizing, delete files on destination after transferring (default)
+ --delete-before When synchronizing, delete files on destination before transferring
+ --delete-during When synchronizing, delete files during transfer
+ --delete-excluded Delete files on dest excluded from sync
+ --disable string Disable a comma separated list of features (use --disable help to see a list)
+ --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
+ --disable-http2 Disable HTTP/2 in the global transport
+ -n, --dry-run Do a trial run with no permanent changes
+ --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
+ --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
+ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
+ --dump-headers Dump HTTP headers - may contain sensitive info
+ --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
+ --fast-list Use recursive list if available; uses more memory but fewer transactions
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
+ --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
+ --header stringArray Set HTTP header for all transactions
+ --header-download stringArray Set HTTP header for download transactions
+ --header-upload stringArray Set HTTP header for upload transactions
+ --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
+ --ignore-case Ignore case in filters (case insensitive)
+ --ignore-case-sync Ignore case when synchronizing
+ --ignore-checksum Skip post copy check of checksums
+ --ignore-errors Delete even if there are I/O errors
+ --ignore-existing Skip all files that exist on destination
+ --ignore-size Ignore size when skipping use mod-time or checksum
+ -I, --ignore-times Don't skip files that match size and time - transfer all files
+ --immutable Do not modify files, fail if existing files have been modified
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --inplace Download directly to destination file instead of atomic download to temp/rename
+ -i, --interactive Enable interactive mode
+ --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
+ --log-file string Log everything to this file
+ --log-format string Comma separated list of log format options (default "date,time")
+ --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
+ --log-systemd Activate systemd integration for the logger
+ --low-level-retries int Number of low level retries to do (default 10)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
+ --max-delete int When synchronizing, limit the number of deletes (default -1)
+ --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
+ --max-transfer SizeSuffix Maximum size of data to transfer (default off)
+ --memprofile string Write memory profile to file
+ -M, --metadata If set, preserve metadata when copying objects
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --metadata-set stringArray Add metadata key=value when uploading
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+ --modify-window Duration Max time diff to be considered the same (default 1ns)
+ --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
+ --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
+ --multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
+ --no-check-certificate Do not verify the server SSL certificate (insecure)
+ --no-check-dest Don't check the destination, copy regardless
+ --no-console Hide console window (supported on Windows only)
+ --no-gzip-encoding Don't set Accept-Encoding: gzip
+ --no-traverse Don't traverse destination file system on copy
+ --no-unicode-normalization Don't normalize unicode characters in filenames
+ --no-update-modtime Don't update destination mod-time if files identical
+ --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
+ --password-command SpaceSepList Command for supplying password for encrypted configuration
+ -P, --progress Show progress during transfer
+ --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
+ -q, --quiet Print as little stuff as possible
+ --rc Enable the remote control server
+ --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
+ --rc-allow-origin string Set the allowed origin for CORS
+ --rc-baseurl string Prefix for URLs - leave blank for root
+ --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --rc-client-ca string Client certificate authority to verify clients with
+ --rc-enable-metrics Enable prometheus metrics on /metrics
+ --rc-files string Path to local files to serve on the HTTP server
+ --rc-htpasswd string A htpasswd file - if not provided no authentication is done
+ --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
+ --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
+ --rc-key string TLS PEM Private key
+ --rc-max-header-bytes int Maximum size of request header (default 4096)
+ --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
+ --rc-no-auth Don't require auth for certain methods
+ --rc-pass string Password for authentication
+ --rc-realm string Realm for authentication
+ --rc-salt string Password hashing salt (default "dlPL2MqE")
+ --rc-serve Enable the serving of remote objects
+ --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --rc-template string User-specified template
+ --rc-user string User name for authentication
+ --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
+ --rc-web-gui Launch WebGUI on localhost
+ --rc-web-gui-force-update Force update to latest version of web gui
+ --rc-web-gui-no-open-browser Don't open the browser automatically
+ --rc-web-gui-update Check and update to latest version of web gui
+ --refresh-times Refresh the modtime of remote files
+ --retries int Retry operations this many times if they fail (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
+ --size-only Skip based on size only, not mod-time or checksum
+ --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
+ --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
+ --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
+ --stats-one-line Make the stats fit on one line
+ --stats-one-line-date Enable --stats-one-line and add current date/time prefix
+ --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
+ --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
+ --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
+ --suffix string Suffix to add to changed files
+ --suffix-keep-extension Preserve the extension when using --suffix
+ --syslog Use Syslog for logging
+ --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
+ --temp-dir string Directory rclone will use for temporary files (default "/tmp")
+ --timeout Duration IO idle timeout (default 5m0s)
+ --tpslimit float Limit HTTP transactions per second to this
+ --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
+ --track-renames When synchronizing, track file renames and do a server-side move if possible
+ --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
+ --transfers int Number of file transfers to run in parallel (default 4)
+ -u, --update Skip files that are newer on the destination
+ --use-cookies Enable session cookiejar
+ --use-json-log Use json log format
+ --use-mmap Use mmap allocator (see docs)
+ --use-server-modtime Use server modified time instead of object metadata
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
+ -v, --verbose count Print lots more stuff (repeat for more)
```
## Backend Flags
@@ -14950,554 +15204,581 @@ These flags are available for every command. They control the backends
and may be set in the config file.
```
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
- --alias-remote string Remote or path to alias
- --azureblob-access-tier string Access tier of blob: hot, cool or archive
- --azureblob-account string Azure Storage Account Name
- --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
- --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
- --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
- --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
- --azureblob-client-id string The ID of the client in use
- --azureblob-client-secret string One of the service principal's client secrets
- --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
- --azureblob-disable-checksum Don't store MD5 checksum with object metadata
- --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
- --azureblob-endpoint string Endpoint for the service
- --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
- --azureblob-key string Storage Account Shared Key
- --azureblob-list-chunk int Size of blob list (default 5000)
- --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
- --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-no-check-container If set, don't attempt to check the container exists or create it
- --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
- --azureblob-password string The user's password (obscured)
- --azureblob-public-access string Public access level of a container: blob or container
- --azureblob-sas-url string SAS URL for container level access only
- --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
- --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
- --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
- --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
- --azureblob-use-emulator Uses local storage emulator if provided as 'true'
- --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
- --azureblob-username string User name (usually an email address)
- --b2-account string Account ID or Application Key ID
- --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
- --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
- --b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
- --b2-download-url string Custom endpoint for downloads
- --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --b2-endpoint string Endpoint for the service
- --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
- --b2-key string Application Key
- --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
- --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --b2-version-at Time Show file versions as they were at the specified time (default off)
- --b2-versions Include old versions in directory listings
- --box-access-token string Box App Primary Access Token
- --box-auth-url string Auth server URL
- --box-box-config-file string Box App config.json location
- --box-box-sub-type string (default "user")
- --box-client-id string OAuth Client Id
- --box-client-secret string OAuth Client Secret
- --box-commit-retries int Max number of times to try committing a multipart file (default 100)
- --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
- --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
- --box-owned-by string Only show items owned by the login (email address) passed in
- --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
- --box-token string OAuth Access Token as a JSON blob
- --box-token-url string Token server url
- --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
- --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
- --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
- --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
- --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
- --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
- --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
- --cache-db-purge Clear all the cached data for this remote on start
- --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
- --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
- --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
- --cache-plex-password string The password of the Plex user (obscured)
- --cache-plex-url string The URL of the Plex server
- --cache-plex-username string The username of the Plex user
- --cache-read-retries int How many times to retry a read from a cache storage (default 10)
- --cache-remote string Remote to cache
- --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
- --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
- --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
- --cache-workers int How many workers should run in parallel to download chunks (default 4)
- --cache-writes Cache file data on writes through the FS
- --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
- --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
- --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
- --chunker-remote string Remote to chunk/unchunk
- --combine-upstreams SpaceSepList Upstreams for combining
- --compress-level int GZIP compression level (-2 to 9) (default -1)
- --compress-mode string Compression mode (default "gzip")
- --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
- --compress-remote string Remote to compress
- -L, --copy-links Follow symlinks and copy the pointed to item
- --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
- --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
- --crypt-filename-encryption string How to encrypt the filenames (default "standard")
- --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
- --crypt-password string Password or pass phrase for encryption (obscured)
- --crypt-password2 string Password or pass phrase for salt (obscured)
- --crypt-remote string Remote to encrypt/decrypt
- --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs
- --crypt-show-mapping For all files listed show how the names encrypt
- --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
- --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
- --drive-auth-owner-only Only consider files owned by the authenticated user
- --drive-auth-url string Auth server URL
- --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
- --drive-client-id string Google Application Client Id
- --drive-client-secret string OAuth Client Secret
- --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
- --drive-disable-http2 Disable drive using http2 (default true)
- --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
- --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
- --drive-formats string Deprecated: See export_formats
- --drive-impersonate string Impersonate this user when using a service account
- --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
- --drive-keep-revision-forever Keep new head revision of each file forever
- --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
- --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
- --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
- --drive-resource-key string Resource key for accessing a link-shared file
- --drive-root-folder-id string ID of the root folder
- --drive-scope string Scope that rclone should use when requesting access from drive
- --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs
- --drive-service-account-credentials string Service Account Credentials JSON blob
- --drive-service-account-file string Service Account Credentials JSON file path
- --drive-shared-with-me Only show files that are shared with me
- --drive-size-as-quota Show sizes as storage quota usage, not actual size
- --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
- --drive-skip-dangling-shortcuts If set skip dangling shortcut files
- --drive-skip-gdocs Skip google documents in all listings
- --drive-skip-shortcuts If set skip shortcut files
- --drive-starred-only Only show files that are starred
- --drive-stop-on-download-limit Make download limit errors be fatal
- --drive-stop-on-upload-limit Make upload limit errors be fatal
- --drive-team-drive string ID of the Shared Drive (Team Drive)
- --drive-token string OAuth Access Token as a JSON blob
- --drive-token-url string Token server url
- --drive-trashed-only Only show files that are in the trash
- --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
- --drive-use-created-date Use file created date instead of modified date
- --drive-use-shared-date Use date file was shared instead of modified date
- --drive-use-trash Send files to the trash instead of deleting permanently (default true)
- --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
- --dropbox-auth-url string Auth server URL
- --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
- --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
- --dropbox-batch-size int Max number of files in upload batch
- --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
- --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
- --dropbox-client-id string OAuth Client Id
- --dropbox-client-secret string OAuth Client Secret
- --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
- --dropbox-impersonate string Impersonate this user when using a business account
- --dropbox-shared-files Instructs rclone to work on individual shared files
- --dropbox-shared-folders Instructs rclone to work on shared folders
- --dropbox-token string OAuth Access Token as a JSON blob
- --dropbox-token-url string Token server url
- --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
- --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
- --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
- --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
- --fichier-shared-folder string If you want to download a shared folder, add this parameter
- --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --filefabric-permanent-token string Permanent Authentication Token
- --filefabric-root-folder-id string ID of the root folder
- --filefabric-token string Session Token
- --filefabric-token-expiry string Token expiry time
- --filefabric-url string URL of the Enterprise File Fabric to connect to
- --filefabric-version string Version read from the file fabric
- --ftp-ask-password Allow asking for FTP password when needed
- --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
- --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
- --ftp-disable-epsv Disable using EPSV even if server advertises support
- --ftp-disable-mlsd Disable using MLSD even if server advertises support
- --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
- --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
- --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
- --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
- --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
- --ftp-host string FTP host to connect to
- --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --ftp-no-check-certificate Do not verify the TLS certificate of the server
- --ftp-pass string FTP password (obscured)
- --ftp-port int FTP port number (default 21)
- --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
- --ftp-tls Use Implicit FTPS (FTP over TLS)
- --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
- --ftp-user string FTP username (default "$USER")
- --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
- --gcs-anonymous Access public buckets and objects without credentials
- --gcs-auth-url string Auth server URL
- --gcs-bucket-acl string Access Control List for new buckets
- --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
- --gcs-client-id string OAuth Client Id
- --gcs-client-secret string OAuth Client Secret
- --gcs-decompress If set this will decompress gzip encoded objects
- --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gcs-endpoint string Endpoint for the service
- --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
- --gcs-location string Location for the newly created buckets
- --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --gcs-object-acl string Access Control List for new objects
- --gcs-project-number string Project number
- --gcs-service-account-file string Service Account Credentials JSON file path
- --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
- --gcs-token string OAuth Access Token as a JSON blob
- --gcs-token-url string Token server url
- --gphotos-auth-url string Auth server URL
- --gphotos-client-id string OAuth Client Id
- --gphotos-client-secret string OAuth Client Secret
- --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gphotos-include-archived Also view and download archived media
- --gphotos-read-only Set to make the Google Photos backend read only
- --gphotos-read-size Set to read the size of media items
- --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
- --gphotos-token string OAuth Access Token as a JSON blob
- --gphotos-token-url string Token server url
- --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
- --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
- --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
- --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
- --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
- --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
- --hdfs-namenode string Hadoop name node and port
- --hdfs-service-principal-name string Kerberos service principal name for the namenode
- --hdfs-username string Hadoop user name
- --hidrive-auth-url string Auth server URL
- --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
- --hidrive-client-id string OAuth Client Id
- --hidrive-client-secret string OAuth Client Secret
- --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
- --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
- --hidrive-root-prefix string The root/parent folder for all paths (default "/")
- --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
- --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
- --hidrive-token string OAuth Access Token as a JSON blob
- --hidrive-token-url string Token server url
- --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
- --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
- --http-headers CommaSepList Set HTTP headers for all transactions
- --http-no-head Don't use HEAD requests
- --http-no-slash Set this if the site doesn't end directories with /
- --http-url string URL of HTTP host to connect to
- --internetarchive-access-key-id string IAS3 Access Key
- --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
- --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
- --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
- --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
- --internetarchive-secret-access-key string IAS3 Secret Key (password)
- --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
- --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
- --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
- --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
- --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
- --jottacloud-trashed-only Only show files that are in the trash
- --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
- --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --koofr-endpoint string The Koofr API endpoint to use
- --koofr-mountid string Mount ID of the mount to use
- --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
- --koofr-provider string Choose your storage provider
- --koofr-setmtime Does the backend support setting modification time (default true)
- --koofr-user string Your user name
- -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
- --local-case-insensitive Force the filesystem to report itself as case insensitive
- --local-case-sensitive Force the filesystem to report itself as case sensitive
- --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --local-no-check-updated Don't check to see if the files change during upload
- --local-no-preallocate Disable preallocation of disk space for transferred files
- --local-no-set-modtime Disable setting modtime
- --local-no-sparse Disable sparse files for multi-thread downloads
- --local-nounc Disable UNC (long path names) conversion on Windows
- --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
- --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
- --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
- --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --mailru-pass string Password (obscured)
- --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
- --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
- --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
- --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
- --mailru-user string User name (usually email)
- --mega-debug Output more debug from Mega
- --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --mega-hard-delete Delete files permanently rather than putting them into the trash
- --mega-pass string Password (obscured)
- --mega-use-https Use HTTPS for transfers
- --mega-user string User name
- --netstorage-account string Set the NetStorage account name
- --netstorage-host string Domain+path of NetStorage host to connect to
- --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
- --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
- -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
- --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
- --onedrive-auth-url string Auth server URL
- --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
- --onedrive-client-id string OAuth Client Id
- --onedrive-client-secret string OAuth Client Secret
- --onedrive-drive-id string The ID of the drive to use
- --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
- --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
- --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
- --onedrive-link-password string Set the password for links created by the link command
- --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
- --onedrive-link-type string Set the type of the links created by the link command (default "view")
- --onedrive-list-chunk int Size of listing chunk (default 1000)
- --onedrive-no-versions Remove all versions on modifying operations
- --onedrive-region string Choose national cloud region for OneDrive (default "global")
- --onedrive-root-folder-id string ID of the root folder
- --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
- --onedrive-token string OAuth Access Token as a JSON blob
- --onedrive-token-url string Token server url
- --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --oos-compartment string Object storage compartment OCID
- --oos-config-file string Path to OCI config file (default "~/.oci/config")
- --oos-config-profile string Profile name inside the oci config file (default "Default")
- --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --oos-copy-timeout Duration Timeout for copy (default 1m0s)
- --oos-disable-checksum Don't store MD5 checksum with object metadata
- --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --oos-endpoint string Endpoint for Object storage API
- --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --oos-namespace string Object storage namespace
- --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --oos-provider string Choose your Auth Provider (default "env_auth")
- --oos-region string Object storage Region
- --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
- --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
- --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
- --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
- --oos-sse-kms-key-id string if using using your own master key in vault, this header specifies the
- --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
- --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
- --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
- --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
- --opendrive-password string Password (obscured)
- --opendrive-username string Username
- --pcloud-auth-url string Auth server URL
- --pcloud-client-id string OAuth Client Id
- --pcloud-client-secret string OAuth Client Secret
- --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
- --pcloud-password string Your pcloud password (obscured)
- --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
- --pcloud-token string OAuth Access Token as a JSON blob
- --pcloud-token-url string Token server url
- --pcloud-username string Your pcloud username
- --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --qingstor-access-key-id string QingStor Access Key ID
- --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
- --qingstor-connection-retries int Number of connection retries (default 3)
- --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
- --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
- --qingstor-env-auth Get QingStor credentials from runtime
- --qingstor-secret-access-key string QingStor Secret Access Key (password)
- --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
- --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --qingstor-zone string Zone to connect to
- --s3-access-key-id string AWS Access Key ID
- --s3-acl string Canned ACL used when creating buckets and storing or copying objects
- --s3-bucket-acl string Canned ACL used when creating buckets
- --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --s3-decompress If set this will decompress gzip encoded objects
- --s3-disable-checksum Don't store MD5 checksum with object metadata
- --s3-disable-http2 Disable usage of http2 for S3 backends
- --s3-download-url string Custom endpoint for downloads
- --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --s3-endpoint string Endpoint for S3 API
- --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
- --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
- --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
- --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
- --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
- --s3-location-constraint string Location constraint - must be set to match the Region
- --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
- --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
- --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --s3-no-head If set, don't HEAD uploaded objects to check integrity
- --s3-no-head-object If set, do not do HEAD before GET when getting objects
- --s3-no-system-metadata Suppress setting and reading of system metadata
- --s3-profile string Profile to use in the shared credentials file
- --s3-provider string Choose your S3 provider
- --s3-region string Region to connect to
- --s3-requester-pays Enables requester pays option when interacting with S3 bucket
- --s3-secret-access-key string AWS Secret Access Key (password)
- --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
- --s3-session-token string An AWS session token
- --s3-shared-credentials-file string Path to the shared credentials file
- --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
- --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
- --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
- --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
- --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
- --s3-storage-class string The storage class to use when storing new objects in S3
- --s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
- --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
- --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
- --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
- --s3-v2-auth If true use v2 authentication
- --s3-version-at Time Show file versions as they were at the specified time (default off)
- --s3-versions Include old versions in directory listings
- --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
- --seafile-create-library Should rclone create a library if it doesn't exist
- --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
- --seafile-library string Name of the library
- --seafile-library-key string Library password (for encrypted libraries only) (obscured)
- --seafile-pass string Password (obscured)
- --seafile-url string URL of seafile host to connect to
- --seafile-user string User name (usually email address)
- --sftp-ask-password Allow asking for SFTP password when needed
- --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
- --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
- --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
- --sftp-disable-concurrent-reads If set don't use concurrent reads
- --sftp-disable-concurrent-writes If set don't use concurrent writes
- --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
- --sftp-host string SSH host to connect to
- --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
- --sftp-key-file string Path to PEM-encoded private key file
- --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
- --sftp-key-pem string Raw PEM-encoded private key
- --sftp-key-use-agent When set forces the usage of the ssh-agent
- --sftp-known-hosts-file string Optional path to known_hosts file
- --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
- --sftp-md5sum-command string The command used to read md5 hashes
- --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
- --sftp-path-override string Override path used by SSH shell commands
- --sftp-port int SSH port number (default 22)
- --sftp-pubkey-file string Optional path to public key file
- --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
- --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
- --sftp-set-modtime Set the modified time on the remote if set (default true)
- --sftp-sha1sum-command string The command used to read sha1 hashes
- --sftp-shell-type string The type of SSH shell on remote server, if any
- --sftp-skip-links Set to skip any symlinks and any other non regular files
- --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
- --sftp-use-fstat If set use fstat instead of stat
- --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
- --sftp-user string SSH username (default "$USER")
- --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
- --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --sharefile-endpoint string Endpoint for API calls
- --sharefile-root-folder-id string ID of the root folder
- --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
- --sia-api-password string Sia Daemon API Password (obscured)
- --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
- --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
- --sia-user-agent string Siad User Agent (default "Sia-Agent")
- --skip-links Don't warn about skipped symlinks
- --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
- --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
- --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
- --smb-host string SMB server hostname to connect to
- --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --smb-pass string SMB password (obscured)
- --smb-port int SMB port number (default 445)
- --smb-spn string Service principal name
- --smb-user string SMB username (default "$USER")
- --storj-access-grant string Access grant
- --storj-api-key string API key
- --storj-passphrase string Encryption passphrase
- --storj-provider string Choose an authentication method (default "existing")
- --storj-satellite-address string Satellite address (default "us1.storj.io")
- --sugarsync-access-key-id string Sugarsync Access Key ID
- --sugarsync-app-id string Sugarsync App ID
- --sugarsync-authorization string Sugarsync authorization
- --sugarsync-authorization-expiry string Sugarsync authorization expiry
- --sugarsync-deleted-id string Sugarsync deleted folder id
- --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
- --sugarsync-hard-delete Permanently delete files if true
- --sugarsync-private-access-key string Sugarsync Private Access Key
- --sugarsync-refresh-token string Sugarsync refresh token
- --sugarsync-root-id string Sugarsync root id
- --sugarsync-user string Sugarsync user
- --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
- --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
- --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
- --swift-auth string Authentication URL for server (OS_AUTH_URL)
- --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
- --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
- --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
- --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
- --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
- --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
- --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
- --swift-key string API key or password (OS_PASSWORD)
- --swift-leave-parts-on-error If true avoid calling abort upload on a failure
- --swift-no-chunk Don't chunk files during streaming upload
- --swift-no-large-objects Disable support for static and dynamic large objects
- --swift-region string Region name - optional (OS_REGION_NAME)
- --swift-storage-policy string The storage policy to use when creating a new container
- --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
- --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
- --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
- --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
- --swift-user string User name to log in (OS_USERNAME)
- --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
- --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
- --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
- --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
- --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
- --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
- --union-upstreams string List of space separated upstreams
- --uptobox-access-token string Your access token
- --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
- --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
- --webdav-bearer-token-command string Command to run to get a bearer token
- --webdav-encoding string The encoding for the backend
- --webdav-headers CommaSepList Set HTTP headers for all transactions
- --webdav-pass string Password (obscured)
- --webdav-url string URL of http host to connect to
- --webdav-user string User name
- --webdav-vendor string Name of the WebDAV site/service/software you are using
- --yandex-auth-url string Auth server URL
- --yandex-client-id string OAuth Client Id
- --yandex-client-secret string OAuth Client Secret
- --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --yandex-hard-delete Delete files permanently rather than putting them into the trash
- --yandex-token string OAuth Access Token as a JSON blob
- --yandex-token-url string Token server url
- --zoho-auth-url string Auth server URL
- --zoho-client-id string OAuth Client Id
- --zoho-client-secret string OAuth Client Secret
- --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
- --zoho-region string Zoho region to connect to
- --zoho-token string OAuth Access Token as a JSON blob
- --zoho-token-url string Token server url
+ --acd-auth-url string Auth server URL
+ --acd-client-id string OAuth Client Id
+ --acd-client-secret string OAuth Client Secret
+ --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
+ --acd-token string OAuth Access Token as a JSON blob
+ --acd-token-url string Token server url
+ --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-remote string Remote or path to alias
+ --azureblob-access-tier string Access tier of blob: hot, cool or archive
+ --azureblob-account string Azure Storage Account Name
+ --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
+ --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
+ --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
+ --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
+ --azureblob-client-id string The ID of the client in use
+ --azureblob-client-secret string One of the service principal's client secrets
+ --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --azureblob-disable-checksum Don't store MD5 checksum with object metadata
+ --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
+ --azureblob-endpoint string Endpoint for the service
+ --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
+ --azureblob-key string Storage Account Shared Key
+ --azureblob-list-chunk int Size of blob list (default 5000)
+ --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
+ --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-no-check-container If set, don't attempt to check the container exists or create it
+ --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
+ --azureblob-password string The user's password (obscured)
+ --azureblob-public-access string Public access level of a container: blob or container
+ --azureblob-sas-url string SAS URL for container level access only
+ --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
+ --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
+ --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
+ --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
+ --azureblob-use-emulator Uses local storage emulator if provided as 'true'
+ --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
+ --azureblob-username string User name (usually an email address)
+ --b2-account string Account ID or Application Key ID
+ --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
+ --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-disable-checksum Disable checksums for large (> upload cutoff) files
+ --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-url string Custom endpoint for downloads
+ --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --b2-endpoint string Endpoint for the service
+ --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
+ --b2-key string Application Key
+ --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
+ --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --b2-version-at Time Show file versions as they were at the specified time (default off)
+ --b2-versions Include old versions in directory listings
+ --box-access-token string Box App Primary Access Token
+ --box-auth-url string Auth server URL
+ --box-box-config-file string Box App config.json location
+ --box-box-sub-type string (default "user")
+ --box-client-id string OAuth Client Id
+ --box-client-secret string OAuth Client Secret
+ --box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
+ --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
+ --box-owned-by string Only show items owned by the login (email address) passed in
+ --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
+ --box-token string OAuth Access Token as a JSON blob
+ --box-token-url string Token server url
+ --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
+ --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
+ --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
+ --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
+ --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
+ --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
+ --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
+ --cache-db-purge Clear all the cached data for this remote on start
+ --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
+ --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
+ --cache-plex-password string The password of the Plex user (obscured)
+ --cache-plex-url string The URL of the Plex server
+ --cache-plex-username string The username of the Plex user
+ --cache-read-retries int How many times to retry a read from a cache storage (default 10)
+ --cache-remote string Remote to cache
+ --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
+ --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
+ --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
+ --cache-workers int How many workers should run in parallel to download chunks (default 4)
+ --cache-writes Cache file data on writes through the FS
+ --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
+ --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
+ --chunker-remote string Remote to chunk/unchunk
+ --combine-upstreams SpaceSepList Upstreams for combining
+ --compress-level int GZIP compression level (-2 to 9) (default -1)
+ --compress-mode string Compression mode (default "gzip")
+ --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
+ --compress-remote string Remote to compress
+ -L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
+ --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
+ --crypt-filename-encryption string How to encrypt the filenames (default "standard")
+ --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
+ --crypt-pass-bad-blocks If set this will pass bad blocks through as all 0
+ --crypt-password string Password or pass phrase for encryption (obscured)
+ --crypt-password2 string Password or pass phrase for salt (obscured)
+ --crypt-remote string Remote to encrypt/decrypt
+ --crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
+ --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
+ --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
+ --drive-auth-owner-only Only consider files owned by the authenticated user
+ --drive-auth-url string Auth server URL
+ --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
+ --drive-client-id string Google Application Client Id
+ --drive-client-secret string OAuth Client Secret
+ --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-disable-http2 Disable drive using http2 (default true)
+ --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
+ --drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
+ --drive-formats string Deprecated: See export_formats
+ --drive-impersonate string Impersonate this user when using a service account
+ --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
+ --drive-keep-revision-forever Keep new head revision of each file forever
+ --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
+ --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
+ --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
+ --drive-resource-key string Resource key for accessing a link-shared file
+ --drive-root-folder-id string ID of the root folder
+ --drive-scope string Scope that rclone should use when requesting access from drive
+ --drive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --drive-service-account-credentials string Service Account Credentials JSON blob
+ --drive-service-account-file string Service Account Credentials JSON file path
+ --drive-shared-with-me Only show files that are shared with me
+ --drive-size-as-quota Show sizes as storage quota usage, not actual size
+ --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
+ --drive-skip-dangling-shortcuts If set skip dangling shortcut files
+ --drive-skip-gdocs Skip google documents in all listings
+ --drive-skip-shortcuts If set skip shortcut files
+ --drive-starred-only Only show files that are starred
+ --drive-stop-on-download-limit Make download limit errors be fatal
+ --drive-stop-on-upload-limit Make upload limit errors be fatal
+ --drive-team-drive string ID of the Shared Drive (Team Drive)
+ --drive-token string OAuth Access Token as a JSON blob
+ --drive-token-url string Token server url
+ --drive-trashed-only Only show files that are in the trash
+ --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
+ --drive-use-created-date Use file created date instead of modified date
+ --drive-use-shared-date Use date file was shared instead of modified date
+ --drive-use-trash Send files to the trash instead of deleting permanently (default true)
+ --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
+ --dropbox-auth-url string Auth server URL
+ --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
+ --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
+ --dropbox-batch-size int Max number of files in upload batch
+ --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
+ --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
+ --dropbox-client-id string OAuth Client Id
+ --dropbox-client-secret string OAuth Client Secret
+ --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
+ --dropbox-impersonate string Impersonate this user when using a business account
+ --dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --dropbox-shared-files Instructs rclone to work on individual shared files
+ --dropbox-shared-folders Instructs rclone to work on shared folders
+ --dropbox-token string OAuth Access Token as a JSON blob
+ --dropbox-token-url string Token server url
+ --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
+ --fichier-cdn Set if you wish to use CDN download links
+ --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
+ --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
+ --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
+ --fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --filefabric-permanent-token string Permanent Authentication Token
+ --filefabric-root-folder-id string ID of the root folder
+ --filefabric-token string Session Token
+ --filefabric-token-expiry string Token expiry time
+ --filefabric-url string URL of the Enterprise File Fabric to connect to
+ --filefabric-version string Version read from the file fabric
+ --ftp-ask-password Allow asking for FTP password when needed
+ --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
+ --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-disable-epsv Disable using EPSV even if server advertises support
+ --ftp-disable-mlsd Disable using MLSD even if server advertises support
+ --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
+ --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
+ --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
+ --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
+ --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
+ --ftp-host string FTP host to connect to
+ --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --ftp-no-check-certificate Do not verify the TLS certificate of the server
+ --ftp-pass string FTP password (obscured)
+ --ftp-port int FTP port number (default 21)
+ --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
+ --ftp-tls Use Implicit FTPS (FTP over TLS)
+ --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
+ --ftp-user string FTP username (default "$USER")
+ --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
+ --gcs-anonymous Access public buckets and objects without credentials
+ --gcs-auth-url string Auth server URL
+ --gcs-bucket-acl string Access Control List for new buckets
+ --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
+ --gcs-client-id string OAuth Client Id
+ --gcs-client-secret string OAuth Client Secret
+ --gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gcs-endpoint string Endpoint for the service
+ --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --gcs-location string Location for the newly created buckets
+ --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --gcs-object-acl string Access Control List for new objects
+ --gcs-project-number string Project number
+ --gcs-service-account-file string Service Account Credentials JSON file path
+ --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
+ --gcs-token string OAuth Access Token as a JSON blob
+ --gcs-token-url string Token server url
+ --gcs-user-project string User project
+ --gphotos-auth-url string Auth server URL
+ --gphotos-client-id string OAuth Client Id
+ --gphotos-client-secret string OAuth Client Secret
+ --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gphotos-include-archived Also view and download archived media
+ --gphotos-read-only Set to make the Google Photos backend read only
+ --gphotos-read-size Set to read the size of media items
+ --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
+ --gphotos-token string OAuth Access Token as a JSON blob
+ --gphotos-token-url string Token server url
+ --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
+ --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
+ --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
+ --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
+ --hdfs-namenode string Hadoop name node and port
+ --hdfs-service-principal-name string Kerberos service principal name for the namenode
+ --hdfs-username string Hadoop user name
+ --hidrive-auth-url string Auth server URL
+ --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
+ --hidrive-client-id string OAuth Client Id
+ --hidrive-client-secret string OAuth Client Secret
+ --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
+ --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
+ --hidrive-root-prefix string The root/parent folder for all paths (default "/")
+ --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
+ --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
+ --hidrive-token string OAuth Access Token as a JSON blob
+ --hidrive-token-url string Token server url
+ --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
+ --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-headers CommaSepList Set HTTP headers for all transactions
+ --http-no-head Don't use HEAD requests
+ --http-no-slash Set this if the site doesn't end directories with /
+ --http-url string URL of HTTP host to connect to
+ --internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
+ --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
+ --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
+ --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
+ --internetarchive-secret-access-key string IAS3 Secret Key (password)
+ --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
+ --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
+ --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
+ --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
+ --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
+ --jottacloud-trashed-only Only show files that are in the trash
+ --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --koofr-endpoint string The Koofr API endpoint to use
+ --koofr-mountid string Mount ID of the mount to use
+ --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
+ --koofr-provider string Choose your storage provider
+ --koofr-setmtime Does the backend support setting modification time (default true)
+ --koofr-user string Your user name
+ -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
+ --local-case-insensitive Force the filesystem to report itself as case insensitive
+ --local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --local-no-check-updated Don't check to see if the files change during upload
+ --local-no-preallocate Disable preallocation of disk space for transferred files
+ --local-no-set-modtime Disable setting modtime
+ --local-no-sparse Disable sparse files for multi-thread downloads
+ --local-nounc Disable UNC (long path names) conversion on Windows
+ --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
+ --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
+ --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
+ --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --mailru-pass string Password (obscured)
+ --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
+ --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
+ --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
+ --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
+ --mailru-user string User name (usually email)
+ --mega-debug Output more debug from Mega
+ --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --mega-hard-delete Delete files permanently rather than putting them into the trash
+ --mega-pass string Password (obscured)
+ --mega-use-https Use HTTPS for transfers
+ --mega-user string User name
+ --netstorage-account string Set the NetStorage account name
+ --netstorage-host string Domain+path of NetStorage host to connect to
+ --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
+ --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
+ -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
+ --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
+ --onedrive-auth-url string Auth server URL
+ --onedrive-av-override Allows download of files the server thinks has a virus
+ --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
+ --onedrive-client-id string OAuth Client Id
+ --onedrive-client-secret string OAuth Client Secret
+ --onedrive-drive-id string The ID of the drive to use
+ --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
+ --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
+ --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
+ --onedrive-link-password string Set the password for links created by the link command
+ --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
+ --onedrive-link-type string Set the type of the links created by the link command (default "view")
+ --onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-no-versions Remove all versions on modifying operations
+ --onedrive-region string Choose national cloud region for OneDrive (default "global")
+ --onedrive-root-folder-id string ID of the root folder
+ --onedrive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --onedrive-token string OAuth Access Token as a JSON blob
+ --onedrive-token-url string Token server url
+ --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --oos-compartment string Object storage compartment OCID
+ --oos-config-file string Path to OCI config file (default "~/.oci/config")
+ --oos-config-profile string Profile name inside the oci config file (default "Default")
+ --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-disable-checksum Don't store MD5 checksum with object metadata
+ --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --oos-endpoint string Endpoint for Object storage API
+ --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --oos-namespace string Object storage namespace
+ --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --oos-provider string Choose your Auth Provider (default "env_auth")
+ --oos-region string Object storage Region
+ --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
+ --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
+ --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
+ --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
+ --oos-sse-kms-key-id string if using your own master key in vault, this header specifies the
+ --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
+ --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
+ --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
+ --opendrive-password string Password (obscured)
+ --opendrive-username string Username
+ --pcloud-auth-url string Auth server URL
+ --pcloud-client-id string OAuth Client Id
+ --pcloud-client-secret string OAuth Client Secret
+ --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
+ --pcloud-password string Your pcloud password (obscured)
+ --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
+ --pcloud-token string OAuth Access Token as a JSON blob
+ --pcloud-token-url string Token server url
+ --pcloud-username string Your pcloud username
+ --pikpak-auth-url string Auth server URL
+ --pikpak-client-id string OAuth Client Id
+ --pikpak-client-secret string OAuth Client Secret
+ --pikpak-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
+ --pikpak-pass string Pikpak password (obscured)
+ --pikpak-root-folder-id string ID of the root folder
+ --pikpak-token string OAuth Access Token as a JSON blob
+ --pikpak-token-url string Token server url
+ --pikpak-trashed-only Only show files that are in the trash
+ --pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
+ --pikpak-user string Pikpak username
+ --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --qingstor-access-key-id string QingStor Access Key ID
+ --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
+ --qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
+ --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
+ --qingstor-env-auth Get QingStor credentials from runtime
+ --qingstor-secret-access-key string QingStor Secret Access Key (password)
+ --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
+ --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --qingstor-zone string Zone to connect to
+ --s3-access-key-id string AWS Access Key ID
+ --s3-acl string Canned ACL used when creating buckets and storing or copying objects
+ --s3-bucket-acl string Canned ACL used when creating buckets
+ --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --s3-decompress If set this will decompress gzip encoded objects
+ --s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --s3-disable-checksum Don't store MD5 checksum with object metadata
+ --s3-disable-http2 Disable usage of http2 for S3 backends
+ --s3-download-url string Custom endpoint for downloads
+ --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --s3-endpoint string Endpoint for S3 API
+ --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
+ --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
+ --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
+ --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
+ --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
+ --s3-location-constraint string Location constraint - must be set to match the Region
+ --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
+ --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
+ --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --s3-no-head If set, don't HEAD uploaded objects to check integrity
+ --s3-no-head-object If set, do not do HEAD before GET when getting objects
+ --s3-no-system-metadata Suppress setting and reading of system metadata
+ --s3-profile string Profile to use in the shared credentials file
+ --s3-provider string Choose your S3 provider
+ --s3-region string Region to connect to
+ --s3-requester-pays Enables requester pays option when interacting with S3 bucket
+ --s3-secret-access-key string AWS Secret Access Key (password)
+ --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
+ --s3-session-token string An AWS session token
+ --s3-shared-credentials-file string Path to the shared credentials file
+ --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
+ --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
+ --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
+ --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
+ --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
+ --s3-storage-class string The storage class to use when storing new objects in S3
+ --s3-sts-endpoint string Endpoint for STS
+ --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
+ --s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
+ --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
+ --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
+ --s3-v2-auth If true use v2 authentication
+ --s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-versions Include old versions in directory listings
+ --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
+ --seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
+ --seafile-library string Name of the library
+ --seafile-library-key string Library password (for encrypted libraries only) (obscured)
+ --seafile-pass string Password (obscured)
+ --seafile-url string URL of seafile host to connect to
+ --seafile-user string User name (usually email address)
+ --sftp-ask-password Allow asking for SFTP password when needed
+ --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
+ --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
+ --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
+ --sftp-disable-concurrent-reads If set don't use concurrent reads
+ --sftp-disable-concurrent-writes If set don't use concurrent writes
+ --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
+ --sftp-host string SSH host to connect to
+ --sftp-host-key-algorithms SpaceSepList Space separated list of host key algorithms, ordered by preference
+ --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
+ --sftp-key-file string Path to PEM-encoded private key file
+ --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
+ --sftp-key-pem string Raw PEM-encoded private key
+ --sftp-key-use-agent When set forces the usage of the ssh-agent
+ --sftp-known-hosts-file string Optional path to known_hosts file
+ --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
+ --sftp-md5sum-command string The command used to read md5 hashes
+ --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
+ --sftp-path-override string Override path used by SSH shell commands
+ --sftp-port int SSH port number (default 22)
+ --sftp-pubkey-file string Optional path to public key file
+ --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
+ --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
+ --sftp-set-modtime Set the modified time on the remote if set (default true)
+ --sftp-sha1sum-command string The command used to read sha1 hashes
+ --sftp-shell-type string The type of SSH shell on remote server, if any
+ --sftp-skip-links Set to skip any symlinks and any other non regular files
+ --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
+ --sftp-use-fstat If set use fstat instead of stat
+ --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
+ --sftp-user string SSH username (default "$USER")
+ --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
+ --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --sharefile-endpoint string Endpoint for API calls
+ --sharefile-root-folder-id string ID of the root folder
+ --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
+ --sia-api-password string Sia Daemon API Password (obscured)
+ --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
+ --sia-user-agent string Siad User Agent (default "Sia-Agent")
+ --skip-links Don't warn about skipped symlinks
+ --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
+ --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
+ --smb-host string SMB server hostname to connect to
+ --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --smb-pass string SMB password (obscured)
+ --smb-port int SMB port number (default 445)
+ --smb-spn string Service principal name
+ --smb-user string SMB username (default "$USER")
+ --storj-access-grant string Access grant
+ --storj-api-key string API key
+ --storj-passphrase string Encryption passphrase
+ --storj-provider string Choose an authentication method (default "existing")
+ --storj-satellite-address string Satellite address (default "us1.storj.io")
+ --sugarsync-access-key-id string Sugarsync Access Key ID
+ --sugarsync-app-id string Sugarsync App ID
+ --sugarsync-authorization string Sugarsync authorization
+ --sugarsync-authorization-expiry string Sugarsync authorization expiry
+ --sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
+ --sugarsync-hard-delete Permanently delete files if true
+ --sugarsync-private-access-key string Sugarsync Private Access Key
+ --sugarsync-refresh-token string Sugarsync refresh token
+ --sugarsync-root-id string Sugarsync root id
+ --sugarsync-user string Sugarsync user
+ --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
+ --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
+ --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
+ --swift-auth string Authentication URL for server (OS_AUTH_URL)
+ --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+ --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+ --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+ --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
+ --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
+ --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
+ --swift-key string API key or password (OS_PASSWORD)
+ --swift-leave-parts-on-error If true avoid calling abort upload on a failure
+ --swift-no-chunk Don't chunk files during streaming upload
+ --swift-no-large-objects Disable support for static and dynamic large objects
+ --swift-region string Region name - optional (OS_REGION_NAME)
+ --swift-storage-policy string The storage policy to use when creating a new container
+ --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
+ --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+ --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+ --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+ --swift-user string User name to log in (OS_USERNAME)
+ --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
+ --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
+ --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
+ --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
+ --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
+ --union-upstreams string List of space separated upstreams
+ --uptobox-access-token string Your access token
+ --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
+ --uptobox-private Set to make uploaded files private
+ --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
+ --webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-encoding string The encoding for the backend
+ --webdav-headers CommaSepList Set HTTP headers for all transactions
+ --webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --webdav-pass string Password (obscured)
+ --webdav-url string URL of http host to connect to
+ --webdav-user string User name
+ --webdav-vendor string Name of the WebDAV site/service/software you are using
+ --yandex-auth-url string Auth server URL
+ --yandex-client-id string OAuth Client Id
+ --yandex-client-secret string OAuth Client Secret
+ --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --yandex-hard-delete Delete files permanently rather than putting them into the trash
+ --yandex-token string OAuth Access Token as a JSON blob
+ --yandex-token-url string Token server url
+ --zoho-auth-url string Auth server URL
+ --zoho-client-id string OAuth Client Id
+ --zoho-client-secret string OAuth Client Secret
+ --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
+ --zoho-region string Zoho region to connect to
+ --zoho-token string OAuth Access Token as a JSON blob
+ --zoho-token-url string Token server url
```
# Docker Volume Plugin
@@ -16648,7 +16929,7 @@ quashed by adding `--quiet` to the bisync command line.
# NOTICE: If you make changes to this file you MUST do a --resync run.
# Run with --dry-run to see what changes will be made.
-# Dropbox wont sync some files so filter them away here.
+# Dropbox won't sync some files so filter them away here.
# See https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
- .dropbox.attr
- ~*.tmp
@@ -17060,7 +17341,7 @@ Your normal workflow might be as follows:
Delete a single file.
- `delete-glob `
Delete a group of files located one level deep in the given directory
- with names maching a given glob pattern.
+ with names matching a given glob pattern.
- `touch-glob YYYY-MM-DD `
Change modification time on a group of files.
- `touch-copy YYYY-MM-DD `
@@ -17322,6 +17603,17 @@ Properties:
- Type: string
- Required: false
+#### --fichier-cdn
+
+Set if you wish to use CDN download links.
+
+Properties:
+
+- Config: cdn
+- Env Var: RCLONE_FICHIER_CDN
+- Type: bool
+- Default: false
+
#### --fichier-encoding
The encoding for the backend.
@@ -17803,12 +18095,14 @@ The S3 backend can be used with a number of different providers:
- Arvan Cloud Object Storage (AOS)
- DigitalOcean Spaces
- Dreamhost
+- GCS
- Huawei OBS
- IBM COS S3
- IDrive e2
- IONOS Cloud
- Liara Object Storage
- Minio
+- Petabox
- Qiniu Cloud Object Storage (Kodo)
- RackCorp Object Storage
- Scaleway
@@ -18165,7 +18459,11 @@ However for objects which were uploaded as multipart uploads or with
server side encryption (SSE-AWS or SSE-C) the `ETag` header is no
longer the MD5 sum of the data, so rclone adds an additional piece of
metadata `X-Amz-Meta-Md5chksum` which is a base64 encoded MD5 hash (in
-the same format as is required for `Content-MD5`).
+the same format as is required for `Content-MD5`). You can use base64 -d and hexdump to check this value manually:
+
+ echo 'VWTGdNx3LyXQDfA0e2Edxw==' | base64 -d | hexdump
+
+or you can use `rclone check` to verify the hashes are OK.
For large objects, calculating this hash can take some time so the
addition of this hash can be disabled with `--s3-disable-checksum`.
@@ -18329,7 +18627,7 @@ The different authentication methods are tried in this order:
- Access Key ID: `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`
- Secret Access Key: `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`
- Session Token: `AWS_SESSION_TOKEN` (optional)
- - Or, use a [named profile](https://docs.aws.amazon.com/cli/latest/userguide/cli-multiple-profiles.html):
+ - Or, use a [named profile](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html):
- Profile files are standard files used by AWS CLI tools
- By default it will use the profile in your home directory (e.g. `~/.aws/credentials` on unix based systems) file and the "default" profile, to change set these environment variables:
- `AWS_SHARED_CREDENTIALS_FILE` to control which file.
@@ -18429,7 +18727,7 @@ A simple solution is to set the `--s3-upload-cutoff 0` and force all the files t
### Standard options
-Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
#### --s3-provider
@@ -18446,18 +18744,20 @@ Properties:
- Amazon Web Services (AWS) S3
- "Alibaba"
- Alibaba Cloud Object Storage System (OSS) formerly Aliyun
+ - "ArvanCloud"
+ - Arvan Cloud Object Storage (AOS)
- "Ceph"
- Ceph Object Storage
- "ChinaMobile"
- China Mobile Ecloud Elastic Object Storage (EOS)
- "Cloudflare"
- Cloudflare R2 Storage
- - "ArvanCloud"
- - Arvan Cloud Object Storage (AOS)
- "DigitalOcean"
- DigitalOcean Spaces
- "Dreamhost"
- Dreamhost DreamObjects
+ - "GCS"
+ - Google Cloud Storage
- "HuaweiOBS"
- Huawei Object Storage Service
- "IBMCOS"
@@ -18474,6 +18774,8 @@ Properties:
- Minio Object Storage
- "Netease"
- Netease Object Storage (NOS)
+ - "Petabox"
+ - Petabox Object Storage
- "RackCorp"
- RackCorp Object Storage
- "Scaleway"
@@ -18813,6 +19115,30 @@ Properties:
#### --s3-region
+Region where your bucket will be created and your data stored.
+
+
+Properties:
+
+- Config: region
+- Env Var: RCLONE_S3_REGION
+- Provider: Petabox
+- Type: string
+- Required: false
+- Examples:
+ - "us-east-1"
+ - US East (N. Virginia)
+ - "eu-central-1"
+ - Europe (Frankfurt)
+ - "ap-southeast-1"
+ - Asia Pacific (Singapore)
+ - "me-south-1"
+ - Middle East (Bahrain)
+ - "sa-east-1"
+ - South America (São Paulo)
+
+#### --s3-region
+
Region to connect to.
Leave blank if you are using an S3 clone and you don't have a region.
@@ -18821,7 +19147,7 @@ Properties:
- Config: region
- Env Var: RCLONE_S3_REGION
-- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
+- Provider: !AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
- Type: string
- Required: false
- Examples:
@@ -18932,10 +19258,10 @@ Properties:
- Type: string
- Required: false
- Examples:
- - "s3.ir-thr-at1.arvanstorage.com"
+ - "s3.ir-thr-at1.arvanstorage.ir"
- The default endpoint - a good choice if you are unsure.
- - Tehran Iran (Asiatech)
- - "s3.ir-tbz-sh1.arvanstorage.com"
+ - Tehran Iran (Simin)
+ - "s3.ir-tbz-sh1.arvanstorage.ir"
- Tabriz Iran (Shahriar)
#### --s3-endpoint
@@ -19100,6 +19426,33 @@ Properties:
#### --s3-endpoint
+Endpoint for Petabox S3 Object Storage.
+
+Specify the endpoint from the same region.
+
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: Petabox
+- Type: string
+- Required: true
+- Examples:
+ - "s3.petabox.io"
+ - US East (N. Virginia)
+ - "s3.us-east-1.petabox.io"
+ - US East (N. Virginia)
+ - "s3.eu-central-1.petabox.io"
+ - Europe (Frankfurt)
+ - "s3.ap-southeast-1.petabox.io"
+ - Asia Pacific (Singapore)
+ - "s3.me-south-1.petabox.io"
+ - Middle East (Bahrain)
+ - "s3.sa-east-1.petabox.io"
+ - South America (São Paulo)
+
+#### --s3-endpoint
+
Endpoint for Liara Object Storage API.
Properties:
@@ -19260,6 +19613,21 @@ Properties:
#### --s3-endpoint
+Endpoint for Google Cloud Storage.
+
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: GCS
+- Type: string
+- Required: false
+- Examples:
+ - "https://storage.googleapis.com"
+ - Google Cloud Storage endpoint
+
+#### --s3-endpoint
+
Endpoint for Storj Gateway.
Properties:
@@ -19412,7 +19780,7 @@ Properties:
- Config: endpoint
- Env Var: RCLONE_S3_ENDPOINT
-- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu
+- Provider: !AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox
- Type: string
- Required: false
- Examples:
@@ -19466,8 +19834,10 @@ Properties:
- Wasabi AP Southeast 2 (Sydney)
- "storage.iran.liara.space"
- Liara Iran endpoint
- - "s3.ir-thr-at1.arvanstorage.com"
- - ArvanCloud Tehran Iran (Asiatech) endpoint
+ - "s3.ir-thr-at1.arvanstorage.ir"
+ - ArvanCloud Tehran Iran (Simin) endpoint
+ - "s3.ir-tbz-sh1.arvanstorage.ir"
+ - ArvanCloud Tabriz Iran (Shahriar) endpoint
#### --s3-location-constraint
@@ -19624,7 +19994,7 @@ Properties:
- Required: false
- Examples:
- "ir-thr-at1"
- - Tehran Iran (Asiatech)
+ - Tehran Iran (Simin)
- "ir-tbz-sh1"
- Tabriz Iran (Shahriar)
@@ -19798,7 +20168,7 @@ Properties:
- Config: location_constraint
- Env Var: RCLONE_S3_LOCATION_CONSTRAINT
-- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS
+- Provider: !AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox
- Type: string
- Required: false
@@ -20043,9 +20413,15 @@ Properties:
- "STANDARD"
- The Standard class for any upload.
- Suitable for on-demand content like streaming or CDN.
+ - Available in all regions.
- "GLACIER"
- Archived storage.
- Prices are lower, but it needs to be restored first to be accessed.
+ - Available in FR-PAR and NL-AMS regions.
+ - "ONEZONE_IA"
+ - One Zone - Infrequent Access.
+ - A good choice for storing secondary backup copies or easily re-creatable data.
+ - Available in the FR-PAR region only.
#### --s3-storage-class
@@ -20070,7 +20446,7 @@ Properties:
### Advanced options
-Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
#### --s3-bucket-acl
@@ -20623,6 +20999,21 @@ Properties:
- Type: string
- Required: false
+#### --s3-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is created
+
+Empty folders are unsupported for bucket based remotes, this option creates an empty
+object ending with "/", to persist the folder.
+
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_S3_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
#### --s3-use-multipart-etag
Whether to use ETag in multipart uploads for verification
@@ -20738,6 +21129,30 @@ Properties:
- Type: Tristate
- Default: unset
+#### --s3-use-accept-encoding-gzip
+
+Whether to send `Accept-Encoding: gzip` header.
+
+By default, rclone will append `Accept-Encoding: gzip` to the request to download
+compressed objects whenever possible.
+
+However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
+the signature of the request.
+
+A symptom of this would be receiving errors like
+
+ SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
+
+In this case, you might want to try disabling this option.
+
+
+Properties:
+
+- Config: use_accept_encoding_gzip
+- Env Var: RCLONE_S3_USE_ACCEPT_ENCODING_GZIP
+- Type: Tristate
+- Default: unset
+
#### --s3-no-system-metadata
Suppress setting and reading of system metadata
@@ -21180,6 +21595,21 @@ server_side_encryption =
storage_class =
```
+### Google Cloud Storage
+
+[GoogleCloudStorage](https://cloud.google.com/storage/docs) is an [S3-interoperable](https://cloud.google.com/storage/docs/interoperability) object storage service from Google Cloud Platform.
+
+To connect to Google Cloud Storage you will need an access key and secret key. These can be retrieved by creating an [HMAC key](https://cloud.google.com/storage/docs/authentication/managing-hmackeys).
+
+```
+[gs]
+type = s3
+provider = GCS
+access_key_id = your_access_key
+secret_access_key = your_secret_key
+endpoint = https://storage.googleapis.com
+```
+
### DigitalOcean Spaces
[Spaces](https://www.digitalocean.com/products/object-storage/) is an [S3-interoperable](https://developers.digitalocean.com/documentation/spaces/) object storage service from cloud provider DigitalOcean.
@@ -23128,6 +23558,166 @@ For Netease NOS configure as per the configurator `rclone config`
setting the provider `Netease`. This will automatically set
`force_path_style = false` which is necessary for it to run properly.
+### Petabox
+
+Here is an example of making a [Petabox](https://petabox.io/)
+configuration. First run:
+
+```bash
+rclone config
+```
+
+This will guide you through an interactive setup process.
+
+```
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+n/s> n
+
+Enter name for new remote.
+name> My Petabox Storage
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \ "s3"
+[snip]
+Storage> s3
+
+Option provider.
+Choose your S3 provider.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+[snip]
+XX / Petabox Object Storage
+ \ (Petabox)
+[snip]
+provider> Petabox
+
+Option env_auth.
+Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
+Only applies if access_key_id and secret_access_key is blank.
+Choose a number from below, or type in your own boolean value (true or false).
+Press Enter for the default (false).
+ 1 / Enter AWS credentials in the next step.
+ \ (false)
+ 2 / Get AWS credentials from the environment (env vars or IAM).
+ \ (true)
+env_auth> 1
+
+Option access_key_id.
+AWS Access Key ID.
+Leave blank for anonymous access or runtime credentials.
+Enter a value. Press Enter to leave empty.
+access_key_id> YOUR_ACCESS_KEY_ID
+
+Option secret_access_key.
+AWS Secret Access Key (password).
+Leave blank for anonymous access or runtime credentials.
+Enter a value. Press Enter to leave empty.
+secret_access_key> YOUR_SECRET_ACCESS_KEY
+
+Option region.
+Region where your bucket will be created and your data stored.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ 1 / US East (N. Virginia)
+ \ (us-east-1)
+ 2 / Europe (Frankfurt)
+ \ (eu-central-1)
+ 3 / Asia Pacific (Singapore)
+ \ (ap-southeast-1)
+ 4 / Middle East (Bahrain)
+ \ (me-south-1)
+ 5 / South America (São Paulo)
+ \ (sa-east-1)
+region> 1
+
+Option endpoint.
+Endpoint for Petabox S3 Object Storage.
+Specify the endpoint from the same region.
+Choose a number from below, or type in your own value.
+ 1 / US East (N. Virginia)
+ \ (s3.petabox.io)
+ 2 / US East (N. Virginia)
+ \ (s3.us-east-1.petabox.io)
+ 3 / Europe (Frankfurt)
+ \ (s3.eu-central-1.petabox.io)
+ 4 / Asia Pacific (Singapore)
+ \ (s3.ap-southeast-1.petabox.io)
+ 5 / Middle East (Bahrain)
+ \ (s3.me-south-1.petabox.io)
+ 6 / South America (São Paulo)
+ \ (s3.sa-east-1.petabox.io)
+endpoint> 1
+
+Option acl.
+Canned ACL used when creating buckets and storing or copying objects.
+This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
+For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
+Note that this ACL is applied when server-side copying objects as S3
+doesn't copy the ACL from the source but rather writes a fresh one.
+If the acl is an empty string then no X-Amz-Acl: header is added and
+the default (private) will be used.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ / Owner gets FULL_CONTROL.
+ 1 | No one else has access rights (default).
+ \ (private)
+ / Owner gets FULL_CONTROL.
+ 2 | The AllUsers group gets READ access.
+ \ (public-read)
+ / Owner gets FULL_CONTROL.
+ 3 | The AllUsers group gets READ and WRITE access.
+ | Granting this on a bucket is generally not recommended.
+ \ (public-read-write)
+ / Owner gets FULL_CONTROL.
+ 4 | The AuthenticatedUsers group gets READ access.
+ \ (authenticated-read)
+ / Object owner gets FULL_CONTROL.
+ 5 | Bucket owner gets READ access.
+ | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \ (bucket-owner-read)
+ / Both the object owner and the bucket owner get FULL_CONTROL over the object.
+ 6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \ (bucket-owner-full-control)
+acl> 1
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n> No
+
+Configuration complete.
+Options:
+- type: s3
+- provider: Petabox
+- access_key_id: YOUR_ACCESS_KEY_ID
+- secret_access_key: YOUR_SECRET_ACCESS_KEY
+- region: us-east-1
+- endpoint: s3.petabox.io
+Keep this "My Petabox Storage" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+```
+
+This will leave the config file looking like this.
+
+```
+[My Petabox Storage]
+type = s3
+provider = Petabox
+access_key_id = YOUR_ACCESS_KEY_ID
+secret_access_key = YOUR_SECRET_ACCESS_KEY
+region = us-east-1
+endpoint = s3.petabox.io
+```
+
### Storj
Storj is a decentralized cloud storage which can be used through its
@@ -25182,7 +25772,7 @@ guarantee given hash for all files. If wrapped remote doesn't support it,
chunker will then add metadata to all files, even small. However, this can
double the amount of small files in storage and incur additional service charges.
You can even use chunker to force md5/sha1 support in any other remote
-at expense of sidecar meta objects by setting e.g. `chunk_type=sha1all`
+at expense of sidecar meta objects by setting e.g. `hash_type=sha1all`
to force hashsums and `chunk_size=1P` to effectively disable chunking.
Normally, when a file is copied to chunker controlled remote, chunker
@@ -26061,7 +26651,7 @@ address this problem to a certain degree.
For cloud storage systems with case sensitive file names (e.g. Google Drive),
`base64` can be used to reduce file name length.
For cloud storage systems using UTF-16 to store file names internally
-(e.g. OneDrive), `base32768` can be used to drastically reduce
+(e.g. OneDrive, Dropbox), `base32768` can be used to drastically reduce
file name length.
An alternative, future rclone file name encryption mode may tolerate
@@ -26096,7 +26686,7 @@ Hashes are not stored for crypt. However the data integrity is
protected by an extremely strong crypto authenticator.
Use the `rclone cryptcheck` command to check the
-integrity of a crypted remote instead of `rclone check` which can't
+integrity of an encrypted remote instead of `rclone check` which can't
check the checksums properly.
@@ -26136,7 +26726,7 @@ Properties:
- Very simple filename obfuscation.
- "off"
- Don't encrypt the file names.
- - Adds a ".bin" extension only.
+ - Adds a ".bin", or "suffix" extension only.
#### --crypt-directory-name-encryption
@@ -26191,6 +26781,8 @@ Here are the Advanced options specific to crypt (Encrypt/Decrypt a remote).
#### --crypt-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
@@ -26244,6 +26836,21 @@ Properties:
- "false"
- Encrypt file data.
+#### --crypt-pass-bad-blocks
+
+If set this will pass bad blocks through as all 0.
+
+This should not be set in normal operation, it should only be set if
+trying to recover an encrypted file with errors and it is desired to
+recover as much of the file as possible.
+
+Properties:
+
+- Config: pass_bad_blocks
+- Env Var: RCLONE_CRYPT_PASS_BAD_BLOCKS
+- Type: bool
+- Default: false
+
#### --crypt-filename-encoding
How to encode the encrypted filename to text string.
@@ -26265,7 +26872,21 @@ Properties:
- Encode using base64. Suitable for case sensitive remote.
- "base32768"
- Encode using base32768. Suitable if your remote counts UTF-16 or
- - Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)
+ - Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)
+
+#### --crypt-suffix
+
+If this is set it will override the default suffix of ".bin".
+
+Setting suffix to "none" will result in an empty suffix. This may be useful
+when the path length is critical.
+
+Properties:
+
+- Config: suffix
+- Env Var: RCLONE_CRYPT_SUFFIX
+- Type: string
+- Default: ".bin"
### Metadata
@@ -26322,9 +26943,9 @@ Usage Example:
-## Backing up a crypted remote
+## Backing up an encrypted remote
-If you wish to backup a crypted remote, it is recommended that you use
+If you wish to backup an encrypted remote, it is recommended that you use
`rclone sync` on the encrypted files, and make sure the passwords are
the same in the new encrypted remote.
@@ -27173,8 +27794,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
-- batch_mode: async - default batch_timeout is 500ms
-- batch_mode: sync - default batch_timeout is 10s
+- batch_mode: async - default batch_timeout is 10s
+- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
@@ -27196,6 +27817,17 @@ Properties:
- Type: Duration
- Default: 10m0s
+#### --dropbox-pacer-min-sleep
+
+Minimum time to sleep between API calls.
+
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_DROPBOX_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
#### --dropbox-encoding
The encoding for the backend.
@@ -28334,6 +28966,19 @@ Properties:
- Type: string
- Required: false
+#### --gcs-user-project
+
+User project.
+
+Optional - needed only for requester pays.
+
+Properties:
+
+- Config: user_project
+- Env Var: RCLONE_GCS_USER_PROJECT
+- Type: string
+- Required: false
+
#### --gcs-service-account-file
Service Account Credentials JSON file path.
@@ -28625,6 +29270,21 @@ Properties:
- Type: string
- Required: false
+#### --gcs-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is created
+
+Empty folders are unsupported for bucket based remotes, this option creates an empty
+object ending with "/", to persist the folder.
+
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_GCS_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
#### --gcs-no-check-bucket
If set, don't attempt to check the bucket exists or create it.
@@ -28919,12 +29579,9 @@ There's a few steps we need to go through to accomplish this:
[Google Developer Console](https://console.developers.google.com).
- You must have a project - create one if you don't.
- Then go to "IAM & admin" -> "Service Accounts".
- - Use the "Create Credentials" button. Fill in "Service account name"
-with something that identifies your client. "Role" can be empty.
- - Tick "Furnish a new private key" - select "Key type JSON".
- - Tick "Enable G Suite Domain-wide Delegation". This option makes
-"impersonation" possible, as documented here:
-[Delegating domain-wide authority to the service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#delegatingauthority)
+ - Use the "Create Service Account" button. Fill in "Service account name"
+and "Service account ID" with something that identifies your client.
+ - Select "Create And Continue". Step 2 and 3 are optional.
- These credentials are what rclone will use for authentication.
If you ever need to remove access, press the "Delete service
account key" button.
@@ -29098,7 +29755,7 @@ like a symlink in unix, except they point to the underlying file data
(e.g. the inode in unix terms) so they don't break if the source is
renamed or moved about.
-Be default rclone treats these as follows.
+By default rclone treats these as follows.
For shortcuts pointing to files:
@@ -29761,6 +30418,8 @@ Properties:
#### --drive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two
@@ -29910,6 +30569,24 @@ Properties:
- Type: MultiEncoder
- Default: InvalidUtf8
+#### --drive-env-auth
+
+Get IAM credentials from runtime (environment variables or instance meta data if no env vars).
+
+Only applies if service_account_file and service_account_credentials is blank.
+
+Properties:
+
+- Config: env_auth
+- Env Var: RCLONE_DRIVE_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+ - "false"
+ - Enter credentials in the next step.
+ - "true"
+ - Get GCP IAM credentials from the environment (env vars or IAM).
+
## Backend commands
Here are the commands specific to the drive backend.
@@ -31060,7 +31737,7 @@ directory, usually `~/.cache/rclone/kv/`. Databases are maintained
one per _base_ backend, named like `BaseRemote~hasher.bolt`.
Checksums for multiple `alias`-es into a single base backend
will be stored in the single database. All local paths are treated as
-aliases into the `local` backend (unless crypted or chunked) and stored
+aliases into the `local` backend (unless encrypted or chunked) and stored
in `~/.cache/rclone/kv/local~hasher.bolt`.
Databases can be shared between multiple rclone processes.
@@ -31265,9 +31942,9 @@ Properties:
Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
-checks, and wire encryption is required when communicating the the
-datanodes. Possible values are 'authentication', 'integrity' and
-'privacy'. Used only with KERBEROS enabled.
+checks, and wire encryption are required when communicating with
+the datanodes. Possible values are 'authentication', 'integrity'
+and 'privacy'. Used only with KERBEROS enabled.
Properties:
@@ -32489,7 +33166,7 @@ Small files will be cached in memory - see the
[--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
When uploading from local disk the source checksum is always available,
so this does not apply. Starting with rclone version 1.52 the same is
-true for crypted remotes (in older versions the crypt backend would not
+true for encrypted remotes (in older versions the crypt backend would not
calculate hashes for uploads from local disk, so the Jottacloud
backend had to do it as described above).
@@ -33630,7 +34307,7 @@ Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
-HTTPS is normally not necesary since all data is already encrypted anyway.
+HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.
Properties:
@@ -34160,6 +34837,12 @@ It reads configuration from these variables, in the following order:
- `AZURE_CLIENT_ID`: client ID of the application the user will authenticate to
- `AZURE_USERNAME`: a username (usually an email address)
- `AZURE_PASSWORD`: the user's password
+4. Workload Identity
+ - `AZURE_TENANT_ID`: Tenant to authenticate in.
+ - `AZURE_CLIENT_ID`: Client ID of the application the user will authenticate to.
+ - `AZURE_FEDERATED_TOKEN_FILE`: Path to projected service account token file.
+ - `AZURE_AUTHORITY_HOST`: Authority of an Azure Active Directory endpoint (default: login.microsoftonline.com).
+
##### Env Auth: 2. Managed Service Identity Credentials
@@ -34187,7 +34870,7 @@ Then you could access rclone resources like this:
Or
- rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER
+ rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
Which is analogous to using the `az` tool:
@@ -34784,6 +35467,24 @@ Properties:
- "container"
- Allow full public read access for container and blob data.
+#### --azureblob-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is created
+
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with "/", to persist the folder.
+
+This object also has the metadata "hdi_isfolder = true" to conform to
+the Microsoft standard.
+
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_AZUREBLOB_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
#### --azureblob-no-check-container
If set, don't attempt to check the container exists or create it.
@@ -35277,6 +35978,8 @@ Properties:
#### --onedrive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND
@@ -35380,7 +36083,7 @@ Properties:
Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
-default hash which is is QuickXorHash.
+default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
@@ -35417,6 +36120,30 @@ Properties:
- "none"
- None - don't use any hashes
+#### --onedrive-av-override
+
+Allows download of files the server thinks has a virus.
+
+The onedrive/sharepoint server may check files uploaded with an Anti
+Virus checker. If it detects any potential viruses or malware it will
+block download of the file.
+
+In this case you will see a message like this
+
+ server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
+
+If you are 100% sure you want to download this file anyway then use
+the --onedrive-av-override flag, or av_override = true in the config
+file.
+
+
+Properties:
+
+- Config: av_override
+- Env Var: RCLONE_ONEDRIVE_AV_OVERRIDE
+- Type: bool
+- Default: false
+
#### --onedrive-encoding
The encoding for the backend.
@@ -36444,7 +37171,7 @@ Properties:
#### --oos-sse-kms-key-id
-if using using your own master key in vault, this header specifies the
+if using your own master key in vault, this header specifies the
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
@@ -37087,6 +37814,7 @@ Commercial implementations of that being:
* [Memset Memstore](https://www.memset.com/cloud/storage/)
* [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
* [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
+ * [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
* [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
Paths are specified as `remote:container` (or `remote:` for the `lsd`
@@ -37110,7 +37838,7 @@ name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
-XX / OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
+XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
\ "swift"
[snip]
Storage> swift
@@ -37139,6 +37867,8 @@ Choose a number from below, or type in your own value
\ "https://auth.storage.memset.com/v2.0"
6 / OVH
\ "https://auth.cloud.ovh.net/v3"
+ 7 / Blomp Cloud Storage
+ \ "https://authenticate.ain.net"
auth>
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
user_id>
@@ -37320,7 +38050,7 @@ as they can't be used in JSON strings.
### Standard options
-Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)).
+Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
#### --swift-env-auth
@@ -37384,6 +38114,8 @@ Properties:
- Memset Memstore UK v2
- "https://auth.cloud.ovh.net/v3"
- OVH
+ - "https://authenticate.ain.net"
+ - Blomp Cloud Storage
#### --swift-user-id
@@ -37560,7 +38292,7 @@ Properties:
### Advanced options
-Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)).
+Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
#### --swift-leave-parts-on-error
@@ -37983,6 +38715,306 @@ Properties:
+# PikPak
+
+PikPak is [a private cloud drive](https://mypikpak.com/).
+
+Paths are specified as `remote:path`, and may be as deep as required, e.g. `remote:directory/subdirectory`.
+
+## Configuration
+
+Here is an example of making a remote for PikPak.
+
+First run:
+
+ rclone config
+
+This will guide you through an interactive setup process:
+
+```
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+
+Enter name for new remote.
+name> remote
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+XX / PikPak
+ \ (pikpak)
+Storage> XX
+
+Option user.
+Pikpak username.
+Enter a value.
+user> USERNAME
+
+Option pass.
+Pikpak password.
+Choose an alternative below.
+y) Yes, type in my own password
+g) Generate random password
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n>
+
+Configuration complete.
+Options:
+- type: pikpak
+- user: USERNAME
+- pass: *** ENCRYPTED ***
+- token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
+Keep this "remote" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+```
+
+
+### Standard options
+
+Here are the Standard options specific to pikpak (PikPak).
+
+#### --pikpak-user
+
+Pikpak username.
+
+Properties:
+
+- Config: user
+- Env Var: RCLONE_PIKPAK_USER
+- Type: string
+- Required: true
+
+#### --pikpak-pass
+
+Pikpak password.
+
+**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+
+Properties:
+
+- Config: pass
+- Env Var: RCLONE_PIKPAK_PASS
+- Type: string
+- Required: true
+
+### Advanced options
+
+Here are the Advanced options specific to pikpak (PikPak).
+
+#### --pikpak-client-id
+
+OAuth Client Id.
+
+Leave blank normally.
+
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PIKPAK_CLIENT_ID
+- Type: string
+- Required: false
+
+#### --pikpak-client-secret
+
+OAuth Client Secret.
+
+Leave blank normally.
+
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
+- Type: string
+- Required: false
+
+#### --pikpak-token
+
+OAuth Access Token as a JSON blob.
+
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PIKPAK_TOKEN
+- Type: string
+- Required: false
+
+#### --pikpak-auth-url
+
+Auth server URL.
+
+Leave blank to use the provider defaults.
+
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PIKPAK_AUTH_URL
+- Type: string
+- Required: false
+
+#### --pikpak-token-url
+
+Token server url.
+
+Leave blank to use the provider defaults.
+
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PIKPAK_TOKEN_URL
+- Type: string
+- Required: false
+
+#### --pikpak-root-folder-id
+
+ID of the root folder.
+Leave blank normally.
+
+Fill in for rclone to use a non root folder as its starting point.
+
+
+Properties:
+
+- Config: root_folder_id
+- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
+- Type: string
+- Required: false
+
+#### --pikpak-use-trash
+
+Send files to the trash instead of deleting permanently.
+
+Defaults to true, namely sending files to the trash.
+Use `--pikpak-use-trash=false` to delete files permanently instead.
+
+Properties:
+
+- Config: use_trash
+- Env Var: RCLONE_PIKPAK_USE_TRASH
+- Type: bool
+- Default: true
+
+#### --pikpak-trashed-only
+
+Only show files that are in the trash.
+
+This will show trashed files in their original directory structure.
+
+Properties:
+
+- Config: trashed_only
+- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
+- Type: bool
+- Default: false
+
+#### --pikpak-hash-memory-limit
+
+Files bigger than this will be cached on disk to calculate hash if required.
+
+Properties:
+
+- Config: hash_memory_limit
+- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
+- Type: SizeSuffix
+- Default: 10Mi
+
+#### --pikpak-encoding
+
+The encoding for the backend.
+
+See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PIKPAK_ENCODING
+- Type: MultiEncoder
+- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+
+## Backend commands
+
+Here are the commands specific to the pikpak backend.
+
+Run them with
+
+ rclone backend COMMAND remote:
+
+The help below will explain what arguments each command takes.
+
+See the [backend](https://rclone.org/commands/rclone_backend/) command for more
+info on how to pass options and arguments.
+
+These can be run on a running backend using the rc command
+[backend/command](https://rclone.org/rc/#backend-command).
+
+### addurl
+
+Add offline download task for url
+
+ rclone backend addurl remote: [options] [+]
+
+This command adds offline download task for url.
+
+Usage:
+
+ rclone backend addurl pikpak:dirpath url
+
+Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
+download will fallback to default 'My Pack' folder.
+
+
+### decompress
+
+Request decompress of a file/files in a folder
+
+ rclone backend decompress remote: [options] [+]
+
+This command requests decompress of file/files in a folder.
+
+Usage:
+
+ rclone backend decompress pikpak:dirpath {filename} -o password=password
+ rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
+
+An optional argument 'filename' can be specified for a file located in
+'pikpak:dirpath'. You may want to pass '-o password=password' for a
+password-protected files. Also, pass '-o delete-src-file' to delete
+source files after decompression finished.
+
+Result:
+
+ {
+ "Decompressed": 17,
+ "SourceDeleted": 0,
+ "Errors": 0
+ }
+
+
+
+
+## Limitations ##
+
+### Hashes ###
+
+PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.
+
+### Deleted files ###
+
+Deleted files will still be visible with `--pikpak-trashed-only` even after the trash emptied. This goes away after few days.
+
# premiumize.me
Paths are specified as `remote:path`
@@ -39503,7 +40535,7 @@ Pass multiple variables space separated, eg
VAR1=value VAR2=value
-and pass variables with spaces in in quotes, eg
+and pass variables with spaces in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
@@ -39574,6 +40606,26 @@ Properties:
- Type: SpaceSepList
- Default:
+#### --sftp-host-key-algorithms
+
+Space separated list of host key algorithms, ordered by preference.
+
+At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
+
+Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
+
+Example:
+
+ ssh-ed25519 ssh-rsa ssh-dss
+
+
+Properties:
+
+- Config: host_key_algorithms
+- Env Var: RCLONE_SFTP_HOST_KEY_ALGORITHMS
+- Type: SpaceSepList
+- Default:
+
## Limitations
@@ -39623,7 +40675,7 @@ command.) You may put subdirectories in too, e.g. `remote:item/path/to/dir`.
## Notes
The first path segment must be the name of the share, which you entered when you started to share on Windows. On smbd, it's the section title in `smb.conf` (usually in `/etc/samba/`) file.
-You can find shares by quering the root if you're unsure (e.g. `rclone lsd remote:`).
+You can find shares by querying the root if you're unsure (e.g. `rclone lsd remote:`).
You can't access to the shared printers from rclone, obviously.
@@ -40641,7 +41693,8 @@ To copy a local directory to an Uptobox directory called backup
### Modified time and hashes
-Uptobox supports neither modified times nor checksums.
+Uptobox supports neither modified times nor checksums. All timestamps
+will read as that set by `--default-time`.
### Restricted filename characters
@@ -40678,6 +41731,17 @@ Properties:
Here are the Advanced options specific to uptobox (Uptobox).
+#### --uptobox-private
+
+Set to make uploaded files private
+
+Properties:
+
+- Config: private
+- Env Var: RCLONE_UPTOBOX_PRIVATE
+- Type: bool
+- Default: false
+
#### --uptobox-encoding
The encoding for the backend.
@@ -40997,17 +42061,19 @@ Choose a number from below, or type in your own value
url> https://example.com/remote.php/webdav/
Name of the WebDAV site/service/software you are using
Choose a number from below, or type in your own value
- 1 / Nextcloud
- \ "nextcloud"
- 2 / Owncloud
- \ "owncloud"
- 3 / Sharepoint Online, authenticated by Microsoft account.
- \ "sharepoint"
- 4 / Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
- \ "sharepoint-ntlm"
- 5 / Other site/service or software
- \ "other"
-vendor> 1
+ 1 / Fastmail Files
+ \ (fastmail)
+ 2 / Nextcloud
+ \ (nextcloud)
+ 3 / Owncloud
+ \ (owncloud)
+ 4 / Sharepoint Online, authenticated by Microsoft account
+ \ (sharepoint)
+ 5 / Sharepoint with NTLM authentication, usually self-hosted or on-premises
+ \ (sharepoint-ntlm)
+ 6 / Other site/service or software
+ \ (other)
+vendor> 2
User name
user> user
Password.
@@ -41054,10 +42120,10 @@ To copy a local directory to an WebDAV directory called backup
### Modified time and hashes ###
Plain WebDAV does not support modified times. However when used with
-Owncloud or Nextcloud rclone will support modified times.
+Fastmail Files, Owncloud or Nextcloud rclone will support modified times.
Likewise plain WebDAV does not support hashes, however when used with
-Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes.
+Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes.
Depending on the exact version of Owncloud or Nextcloud hashes may
appear on all objects, or only on objects which had a hash uploaded
with them.
@@ -41091,6 +42157,8 @@ Properties:
- Type: string
- Required: false
- Examples:
+ - "fastmail"
+ - Fastmail Files
- "nextcloud"
- Nextcloud
- "owncloud"
@@ -41190,12 +42258,50 @@ Properties:
- Type: CommaSepList
- Default:
+#### --webdav-pacer-min-sleep
+
+Minimum time to sleep between API calls.
+
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_WEBDAV_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
+#### --webdav-nextcloud-chunk-size
+
+Nextcloud upload chunk size.
+
+We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances.
+See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
+
+Set to 0 to disable chunked uploading.
+
+
+Properties:
+
+- Config: nextcloud_chunk_size
+- Env Var: RCLONE_WEBDAV_NEXTCLOUD_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 10Mi
+
## Provider notes
See below for notes on specific providers.
+## Fastmail Files
+
+Use `https://webdav.fastmail.com/` or a subdirectory as the URL,
+and your Fastmail email `username@domain.tld` as the username.
+Follow [this documentation](https://www.fastmail.help/hc/en-us/articles/360058752854-App-passwords)
+to create an app password with access to `Files (WebDAV)` and use
+this as the password.
+
+Fastmail supports modified times using the `X-OC-Mtime` header.
+
### Owncloud
Click on the settings cog in the bottom right of the page and this
@@ -42464,6 +43570,196 @@ Options:
# Changelog
+## v1.63.0 - 2023-06-30
+
+[See commits](https://github.com/rclone/rclone/compare/v1.62.0...v1.63.0)
+
+* New backends
+ * [Pikpak](https://rclone.org/pikpak/) (wiserain)
+ * New S3 providers
+ * [petabox.io](https://rclone.org/s3/#petabox) (Andrei Smirnov)
+ * [Google Cloud Storage](https://rclone.org/s3/#google-cloud-storage) (Anthony Pessy)
+ * New WebDAV providers
+ * [Fastmail](https://rclone.org/webdav/#fastmail-files) (Arnavion)
+* Major changes
+ * Files will be copied to a temporary name ending in `.partial` when copying to `local`,`ftp`,`sftp` then renamed at the end of the transfer. (Janne Hellsten, Nick Craig-Wood)
+ * This helps with data integrity as we don't delete the existing file until the new one is complete.
+ * It can be disabled with the [--inplace](https://rclone.org/docs/#inplace) flag.
+ * This behaviour will also happen if the backend is wrapped, for example `sftp` wrapped with `crypt`.
+ * The [s3](https://rclone.org/s3/#s3-directory-markers), [azureblob](/azureblob/#azureblob-directory-markers) and [gcs](/googlecloudstorage/#gcs-directory-markers) backends now support directory markers so empty directories are supported (Jānis Bebrītis, Nick Craig-Wood)
+ * The [--default-time](https://rclone.org/docs/#default-time-time) flag now controls the unknown modification time of files/dirs (Nick Craig-Wood)
+ * If a file or directory does not have a modification time rclone can read then rclone will display this fixed time instead.
+ * For the old behaviour use `--default-time 0s` which will set this time to the time rclone started up.
+* New Features
+ * build
+ * Modernise linters in use and fixup all affected code (albertony)
+ * Push docker beta to GHCR (GitHub container registry) (Richard Tweed)
+ * cat: Add `--separator` option to cat command (Loren Gordon)
+ * config
+ * Do not remove/overwrite other files during config file save (albertony)
+ * Do not overwrite config file symbolic link (albertony)
+ * Stop `config create` making invalid config files (Nick Craig-Wood)
+ * doc updates (Adam K, Aditya Basu, albertony, asdffdsazqqq, Damo, danielkrajnik, Dimitri Papadopoulos, dlitster, Drew Parsons, jumbi77, kapitainsky, mac-15, Mariusz Suchodolski, Nick Craig-Wood, NickIAm, Rintze Zelle, Stanislav Gromov, Tareq Sharafy, URenko, yuudi, Zach Kipp)
+ * fs
+ * Add `size` to JSON logs when moving or copying an object (Nick Craig-Wood)
+ * Allow boolean features to be enabled with `--disable !Feature` (Nick Craig-Wood)
+ * genautocomplete: Rename to `completion` with alias to the old name (Nick Craig-Wood)
+ * librclone: Added example on using `librclone` with Go (alankrit)
+ * lsjson: Make `--stat` more efficient (Nick Craig-Wood)
+ * operations
+ * Implement `--multi-thread-write-buffer-size` for speed improvements on downloads (Paulo Schreiner)
+ * Reopen downloads on error when using `check --download` and `cat` (Nick Craig-Wood)
+ * rc: `config/listremotes` includes remotes defined with environment variables (kapitainsky)
+ * selfupdate: Obey `--no-check-certificate` flag (Nick Craig-Wood)
+ * serve restic: Trigger systemd notify (Shyim)
+ * serve webdav: Implement owncloud checksum and modtime extensions (WeidiDeng)
+ * sync: `--suffix-keep-extension` preserve 2 part extensions like .tar.gz (Nick Craig-Wood)
+* Bug Fixes
+ * accounting
+ * Fix Prometheus metrics to be the same as `core/stats` (Nick Craig-Wood)
+ * Bwlimit signal handler should always start (Sam Lai)
+ * bisync: Fix `maxDelete` parameter being ignored via the rc (Nick Craig-Wood)
+ * cmd/ncdu: Fix screen corruption when logging (eNV25)
+ * filter: Fix deadlock with errors on `--files-from` (douchen)
+ * fs
+ * Fix interaction between `--progress` and `--interactive` (Nick Craig-Wood)
+ * Fix infinite recursive call in pacer ModifyCalculator (fixes issue reported by the staticcheck linter) (albertony)
+ * lib/atexit: Ensure OnError only calls cancel function once (Nick Craig-Wood)
+ * lib/rest: Fix problems re-using HTTP connections (Nick Craig-Wood)
+ * rc
+ * Fix `operations/stat` with trailing `/` (Nick Craig-Wood)
+ * Fix missing `--rc` flags (Nick Craig-Wood)
+ * Fix output of Time values in `options/get` (Nick Craig-Wood)
+ * serve dlna: Fix potential data race (Nick Craig-Wood)
+ * version: Fix reported os/kernel version for windows (albertony)
+* Mount
+ * Add `--mount-case-insensitive` to force the mount to be case insensitive (Nick Craig-Wood)
+ * Removed unnecessary byte slice allocation for reads (Anagh Kumar Baranwal)
+ * Clarify rclone mount error when installed via homebrew (Nick Craig-Wood)
+ * Added _netdev to the example mount so it gets treated as a remote-fs rather than local-fs (Anagh Kumar Baranwal)
+* Mount2
+ * Updated go-fuse version (Anagh Kumar Baranwal)
+ * Fixed statfs (Anagh Kumar Baranwal)
+ * Disable xattrs (Anagh Kumar Baranwal)
+* VFS
+ * Add MkdirAll function to make a directory and all beneath (Nick Craig-Wood)
+ * Fix reload: failed to add virtual dir entry: file does not exist (Nick Craig-Wood)
+ * Fix writing to a read only directory creating spurious directory entries (WeidiDeng)
+ * Fix potential data race (Nick Craig-Wood)
+ * Fix backends being Shutdown too early when startup takes a long time (Nick Craig-Wood)
+* Local
+ * Fix filtering of symlinks with `-l`/`--links` flag (Nick Craig-Wood)
+ * Fix /path/to/file.rclonelink when `-l`/`--links` is in use (Nick Craig-Wood)
+ * Fix crash with `--metadata` on Android (Nick Craig-Wood)
+* Cache
+ * Fix backends shutting down when in use when used via the rc (Nick Craig-Wood)
+* Crypt
+ * Add `--crypt-suffix` option to set a custom suffix for encrypted files (jladbrook)
+ * Add `--crypt-pass-bad-blocks` to allow corrupted file output (Nick Craig-Wood)
+ * Fix reading 0 length files (Nick Craig-Wood)
+ * Try not to return "unexpected EOF" error (Nick Craig-Wood)
+ * Reduce allocations (albertony)
+ * Recommend Dropbox for `base32768` encoding (Nick Craig-Wood)
+* Azure Blob
+ * Empty directory markers (Nick Craig-Wood)
+ * Support azure workload identities (Tareq Sharafy)
+ * Fix azure blob uploads with multiple bits of metadata (Nick Craig-Wood)
+ * Fix azurite compatibility by sending nil tier if set to empty string (Roel Arents)
+* Combine
+ * Implement missing methods (Nick Craig-Wood)
+ * Fix goroutine stack overflow on bad object (Nick Craig-Wood)
+* Drive
+ * Add `--drive-env-auth` to get IAM credentials from runtime (Peter Brunner)
+ * Update drive service account guide (Juang, Yi-Lin)
+ * Fix change notify picking up files outside the root (Nick Craig-Wood)
+ * Fix trailing slash mis-identificaton of folder as file (Nick Craig-Wood)
+ * Fix incorrect remote after Update on object (Nick Craig-Wood)
+* Dropbox
+ * Implement `--dropbox-pacer-min-sleep` flag (Nick Craig-Wood)
+ * Fix the dropbox batcher stalling (Misty)
+* Fichier
+ * Add `--ficicher-cdn` option to use the CDN for download (Nick Craig-Wood)
+* FTP
+ * Lower log message priority when `SetModTime` is not supported to debug (Tobias Gion)
+ * Fix "unsupported LIST line" errors on startup (Nick Craig-Wood)
+ * Fix "501 Not a valid pathname." errors when creating directories (Nick Craig-Wood)
+* Google Cloud Storage
+ * Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+ * Added `--gcs-user-project` needed for requester pays (Christopher Merry)
+* HTTP
+ * Add client certificate user auth middleware. This can auth `serve restic` from the username in the client cert. (Peter Fern)
+* Jottacloud
+ * Fix vfs writeback stuck in a failed upload loop with file versioning disabled (albertony)
+* Onedrive
+ * Add `--onedrive-av-override` flag to download files flagged as virus (Nick Craig-Wood)
+ * Fix quickxorhash on 32 bit architectures (Nick Craig-Wood)
+ * Report any list errors during `rclone cleanup` (albertony)
+* Putio
+ * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
+ * Fix modification times not being preserved for server side copy and move (Nick Craig-Wood)
+ * Fix server side copy failures (400 errors) (Nick Craig-Wood)
+* S3
+ * Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+ * Update Scaleway storage classes (Brian Starkey)
+ * Fix `--s3-versions` on individual objects (Nick Craig-Wood)
+ * Fix hang on aborting multpart upload with iDrive e2 (Nick Craig-Wood)
+ * Fix missing "tier" metadata (Nick Craig-Wood)
+ * Fix V3sign: add missing subresource delete (cc)
+ * Fix Arvancloud Domain and region changes and alphabetise the provider (Ehsan Tadayon)
+ * Fix Qiniu KODO quirks virtualHostStyle is false (zzq)
+* SFTP
+ * Add `--sftp-host-key-algorithms ` to allow specifying SSH host key algorithms (Joel)
+ * Fix using `--sftp-key-use-agent` and `--sftp-key-file` together needing private key file (Arnav Singh)
+ * Fix move to allow overwriting existing files (Nick Craig-Wood)
+ * Don't stat directories before listing them (Nick Craig-Wood)
+ * Don't check remote points to a file if it ends with / (Nick Craig-Wood)
+* Sharefile
+ * Disable streamed transfers as they no longer work (Nick Craig-Wood)
+* Smb
+ * Code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) (albertony)
+* Storj
+ * Fix "uplink: too many requests" errors when uploading to the same file (Nick Craig-Wood)
+ * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
+* Swift
+ * Ignore 404 error when deleting an object (Nick Craig-Wood)
+* Union
+ * Implement missing methods (Nick Craig-Wood)
+ * Allow errors to be unwrapped for inspection (Nick Craig-Wood)
+* Uptobox
+ * Add `--uptobox-private` flag to make all uploaded files private (Nick Craig-Wood)
+ * Fix improper regex (Aaron Gokaslan)
+ * Fix Update returning the wrong object (Nick Craig-Wood)
+ * Fix rmdir declaring that directories weren't empty (Nick Craig-Wood)
+* WebDAV
+ * nextcloud: Add support for chunked uploads (Paul)
+ * Set modtime using propset for owncloud and nextcloud (WeidiDeng)
+ * Make pacer minSleep configurable with `--webdav-pacer-min-sleep` (ed)
+ * Fix server side copy/move not overwriting (WeidiDeng)
+ * Fix modtime on server side copy for owncloud and nextcloud (Nick Craig-Wood)
+* Yandex
+ * Fix 400 Bad Request on transfer failure (Nick Craig-Wood)
+* Zoho
+ * Fix downloads with `Range:` header returning the wrong data (Nick Craig-Wood)
+
+## v1.62.2 - 2023-03-16
+
+[See commits](https://github.com/rclone/rclone/compare/v1.62.1...v1.62.2)
+
+* Bug Fixes
+ * docker volume plugin: Add missing fuse3 dependency (Nick Craig-Wood)
+ * docs: Fix size documentation (asdffdsazqqq)
+* FTP
+ * Fix 426 errors on downloads with vsftpd (Lesmiscore)
+
+## v1.62.1 - 2023-03-15
+
+[See commits](https://github.com/rclone/rclone/compare/v1.62.0...v1.62.1)
+
+* Bug Fixes
+ * docker: Add missing fuse3 dependency (cycneuramus)
+ * build: Update release docs to be more careful with the tag (Nick Craig-Wood)
+ * build: Set Github release to draft while uploading binaries (Nick Craig-Wood)
+
## v1.62.0 - 2023-03-14
[See commits](https://github.com/rclone/rclone/compare/v1.61.0...v1.62.0)
@@ -44460,8 +45756,8 @@ all the docs and Edward Barker for helping re-write the front page.
* Use proper import path go.etcd.io/bbolt (Robert-André Mauchin)
* Crypt
* Calculate hashes for uploads from local disk (Nick Craig-Wood)
- * This allows crypted Jottacloud uploads without using local disk
- * This means crypted s3/b2 uploads will now have hashes
+ * This allows encrypted Jottacloud uploads without using local disk
+ * This means encrypted s3/b2 uploads will now have hashes
* Added `rclone backend decode`/`encode` commands to replicate functionality of `cryptdecode` (Anagh Kumar Baranwal)
* Get rid of the unused Cipher interface as it obfuscated the code (Nick Craig-Wood)
* Azure Blob
@@ -45671,7 +46967,7 @@ Point release to fix hubic and azureblob backends.
* Fix panic when running without plex configs (Remus Bunduc)
* Fix root folder caching (Remus Bunduc)
* Crypt
- * Check the crypted hash of files when uploading for extra data security
+ * Check the encrypted hash of files when uploading for extra data security
* Dropbox
* Make Dropbox for business folders accessible using an initial `/` in the path
* Google Cloud Storage
@@ -46026,7 +47322,7 @@ Point release to fix hubic and azureblob backends.
* New commands
* `rcat` - read from standard input and stream upload
* `tree` - shows a nicely formatted recursive listing
- * `cryptdecode` - decode crypted file names (thanks ishuah)
+ * `cryptdecode` - decode encrypted file names (thanks ishuah)
* `config show` - print the config file
* `config file` - print the config file location
* New Features
@@ -46330,7 +47626,7 @@ Point release to fix hubic and azureblob backends.
* Fix `rclone move` command
* Delete src files which already existed in dst
* Fix deletion of src file when dst file older
- * Fix `rclone check` on crypted file systems
+ * Fix `rclone check` on encrypted file systems
* Make failed uploads not count as "Transferred"
* Make sure high level retries show with `-q`
* Use a vendor directory with godep for repeatable builds
@@ -47109,9 +48405,29 @@ If you are using `systemd-resolved` (default on Arch Linux), ensure it
is at version 233 or higher. Previous releases contain a bug which
causes not all domains to be resolved properly.
-Additionally with the `GODEBUG=netdns=` environment variable the Go
-resolver decision can be influenced. This also allows to resolve certain
-issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
+
+The Go resolver decision can be influenced with the `GODEBUG=netdns=...`
+environment variable. This also allows to resolve certain issues with
+DNS resolution. On Windows or MacOS systems, try forcing use of the
+internal Go resolver by setting `GODEBUG=netdns=go` at runtime. On
+other systems (Linux, \*BSD, etc) try forcing use of the system
+name resolver by setting `GODEBUG=netdns=cgo` (and recompile rclone
+from source with CGO enabled if necessary). See the
+[name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
+
+### Failed to start auth webserver on Windows ###
+```
+Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+...
+yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+```
+
+This is sometimes caused by the Host Network Service causing issues with opening the port on the host.
+
+A simple solution may be restarting the Host Network Service with eg. Powershell
+```
+Restart-Service hns
+```
### The total size reported in the stats for a sync is wrong and keeps changing
@@ -47191,7 +48507,7 @@ Authors
Contributors
------------
-{{< rem `email addresses removed from here need to be addeed to
+{{< rem `email addresses removed from here need to be added to
bin/.ignore-emails to make sure update-authors.py doesn't immediately
put them back in again.` >}}
@@ -47765,7 +49081,7 @@ put them back in again.` >}}
* Leroy van Logchem
* Zsolt Ero
* Lesmiscore
- * ehsantdy
+ * ehsantdy
* SwazRGB <65694696+swazrgb@users.noreply.github.com>
* Mateusz Puczyński
* Michael C Tiernan - MIT-Research Computing Project
@@ -47775,6 +49091,7 @@ put them back in again.` >}}
* Christian Galo <36752715+cgalo5758@users.noreply.github.com>
* Erik van Velzen
* Derek Battams
+ * Paul
* SimonLiu
* Hugo Laloge
* Mr-Kanister <68117355+Mr-Kanister@users.noreply.github.com>
@@ -47873,6 +49190,48 @@ put them back in again.` >}}
* Peter Brunner
* Leandro Sacchet
* dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+ * cycneuramus <56681631+cycneuramus@users.noreply.github.com>
+ * Arnavion
+ * Christopher Merry
+ * Thibault Coupin
+ * Richard Tweed
+ * Zach Kipp
+ * yuudi <26199752+yuudi@users.noreply.github.com>
+ * NickIAm
+ * Juang, Yi-Lin
+ * jumbi77
+ * Aditya Basu
+ * ed
+ * Drew Parsons
+ * Joel
+ * wiserain
+ * Roel Arents
+ * Shyim
+ * Rintze Zelle <78232505+rzelle-lallemand@users.noreply.github.com>
+ * Damo
+ * WeidiDeng
+ * Brian Starkey
+ * jladbrook
+ * Loren Gordon
+ * dlitster
+ * Tobias Gion
+ * Jānis Bebrītis
+ * Adam K
+ * Andrei Smirnov
+ * Janne Hellsten
+ * cc <12904584+shvc@users.noreply.github.com>
+ * Tareq Sharafy
+ * kapitainsky
+ * douchen
+ * Sam Lai <70988+slai@users.noreply.github.com>
+ * URenko <18209292+URenko@users.noreply.github.com>
+ * Stanislav Gromov
+ * Paulo Schreiner
+ * Mariusz Suchodolski
+ * danielkrajnik
+ * Peter Fern
+ * zzq
+ * mac-15
# Contact the rclone project #
diff --git a/MANUAL.txt b/MANUAL.txt
index 2a6915092..54b10d5af 100644
--- a/MANUAL.txt
+++ b/MANUAL.txt
@@ -1,6 +1,6 @@
rclone(1) User Manual
Nick Craig-Wood
-Mar 14, 2023
+Jun 30, 2023
Rclone syncs your files to cloud storage
@@ -107,6 +107,7 @@ S3, that work out of the box.)
- Dreamhost
- Dropbox
- Enterprise File Fabric
+- Fastmail Files
- FTP
- Google Cloud Storage
- Google Drive
@@ -131,12 +132,15 @@ S3, that work out of the box.)
- Minio
- Nextcloud
- OVH
+- Blomp Cloud Storage
- OpenDrive
- OpenStack Swift
- Oracle Cloud Storage Swift
- Oracle Object Storage
- ownCloud
- pCloud
+- Petabox
+- PikPak
- premiumize.me
- put.io
- QingStor
@@ -325,8 +329,14 @@ Windows package manager (Winget)
Winget comes pre-installed with the latest versions of Windows. If not,
update the App Installer package from the Microsoft store.
+To install rclone
+
winget install Rclone.Rclone
+To uninstall rclone
+
+ winget uninstall Rclone.Rclone --force
+
Chocolatey package manager
Make sure you have Choco installed
@@ -431,10 +441,16 @@ Here are some commands tested on an Ubuntu 18.04.3 host:
# config on host at ~/.config/rclone/rclone.conf
# data on host at ~/data
+ # add a remote interactively
+ docker run --rm -it \
+ --volume ~/.config/rclone:/config/rclone \
+ --user $(id -u):$(id -g) \
+ rclone/rclone \
+ config
+
# make sure the config is ok by listing the remotes
docker run --rm \
--volume ~/.config/rclone:/config/rclone \
- --volume ~/data:/data:shared \
--user $(id -u):$(id -g) \
rclone/rclone \
listremotes
@@ -765,10 +781,12 @@ See the following for detailed instructions for
- Memory
- Microsoft Azure Blob Storage
- Microsoft OneDrive
-- OpenStack Swift / Rackspace Cloudfiles / Memset Memstore
+- OpenStack Swift / Rackspace Cloudfiles / Blomp Cloud Storage /
+ Memset Memstore
- OpenDrive
- Oracle Object Storage
- Pcloud
+- PikPak
- premiumize.me
- put.io
- QingStor
@@ -1146,7 +1164,7 @@ and hashes (MD5 or SHA1) and logs a report of files that don't match. It
doesn't alter the source or destination.
For the crypt remote there is a dedicated command, cryptcheck, that are
-able to check the checksums of the crypted files.
+able to check the checksums of the encrypted files.
If you supply the --size-only flag, it will only compare the sizes not
the hashes as well. Use this for a quick check.
@@ -1185,6 +1203,9 @@ what happened to it. These are reminiscent of diff files.
- ! path means there was an error reading or hashing the source or
dest.
+The default number of parallel checks is 8. See the --checkers=N option
+for more information.
+
rclone check source:path dest:path [flags]
Options
@@ -1472,7 +1493,7 @@ as JSON instead.
Recurses by default, use --max-depth 1 to stop the recursion.
Some backends do not always provide file sizes, see for example Google
-Photos and Google Drive. Rclone will then show a notice in the log
+Photos and Google Docs. Rclone will then show a notice in the log
indicating how many such files were encountered, and count them in as
empty files in the output of the size command.
@@ -1906,16 +1927,29 @@ the end and --offset and --count to print a section in the middle. Note
that if offset is negative it will count from the end, so
--offset -1 --count 1 is equivalent to --tail 1.
+Use the --separator flag to print a separator value between files. Be
+sure to shell-escape special characters. For example, to print a newline
+between files, use:
+
+- bash:
+
+ rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
+
+- powershell:
+
+ rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
+
rclone cat remote:path [flags]
Options
- --count int Only print N characters (default -1)
- --discard Discard the output instead of printing
- --head int Only print the first N characters
- -h, --help help for cat
- --offset int Start printing at offset N (or from end if -ve)
- --tail int Only print the last N characters
+ --count int Only print N characters (default -1)
+ --discard Discard the output instead of printing
+ --head int Only print the first N characters
+ -h, --help help for cat
+ --offset int Start printing at offset N (or from end if -ve)
+ --separator string Separator to use between objects when printing multiple files
+ --tail int Only print the last N characters
See the global flags page for global options not listed here.
@@ -1966,6 +2000,9 @@ what happened to it. These are reminiscent of diff files.
- ! path means there was an error reading or hashing the source or
dest.
+The default number of parallel checks is 8. See the --checkers=N option
+for more information.
+
rclone checksum sumfile src:path [flags]
Options
@@ -1988,13 +2025,12 @@ SEE ALSO
rclone completion
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
Synopsis
-Generate the autocompletion script for rclone for the specified shell.
-See each sub-command's help for details on how to use the generated
-script.
+Generates a shell completion script for rclone. Run with --help to list
+the supported shells.
Options
@@ -2005,84 +2041,77 @@ See the global flags page for global options not listed here.
SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
-- rclone completion bash - Generate the autocompletion script for bash
-- rclone completion fish - Generate the autocompletion script for fish
-- rclone completion powershell - Generate the autocompletion script
- for powershell
-- rclone completion zsh - Generate the autocompletion script for zsh
+- rclone completion bash - Output bash completion script for rclone.
+- rclone completion fish - Output fish completion script for rclone.
+- rclone completion zsh - Output zsh completion script for rclone.
rclone completion bash
-Generate the autocompletion script for bash
+Output bash completion script for rclone.
Synopsis
-Generate the autocompletion script for the bash shell.
+Generates a bash shell autocompletion script for rclone.
-This script depends on the 'bash-completion' package. If it is not
-installed already, you can install it via your OS's package manager.
+This writes to /etc/bash_completion.d/rclone by default so will probably
+need to be run with sudo or as root, e.g.
-To load completions in your current shell session:
+ sudo rclone genautocomplete bash
- source <(rclone completion bash)
+Logout and login again to use the autocompletion scripts, or source them
+directly
-To load completions for every new session, execute once:
+ . /etc/bash_completion
-Linux:
+If you supply a command line argument the script will be written there.
- rclone completion bash > /etc/bash_completion.d/rclone
+If output_file is "-", then the output will be written to stdout.
-macOS:
-
- rclone completion bash > $(brew --prefix)/etc/bash_completion.d/rclone
-
-You will need to start a new shell for this setup to take effect.
-
- rclone completion bash
+ rclone completion bash [output_file] [flags]
Options
- -h, --help help for bash
- --no-descriptions disable completion descriptions
+ -h, --help help for bash
See the global flags page for global options not listed here.
SEE ALSO
-- rclone completion - Generate the autocompletion script for the
- specified shell
+- rclone completion - Output completion script for a given shell.
rclone completion fish
-Generate the autocompletion script for fish
+Output fish completion script for rclone.
Synopsis
-Generate the autocompletion script for the fish shell.
+Generates a fish autocompletion script for rclone.
-To load completions in your current shell session:
+This writes to /etc/fish/completions/rclone.fish by default so will
+probably need to be run with sudo or as root, e.g.
- rclone completion fish | source
+ sudo rclone genautocomplete fish
-To load completions for every new session, execute once:
+Logout and login again to use the autocompletion scripts, or source them
+directly
- rclone completion fish > ~/.config/fish/completions/rclone.fish
+ . /etc/fish/completions/rclone.fish
-You will need to start a new shell for this setup to take effect.
+If you supply a command line argument the script will be written there.
- rclone completion fish [flags]
+If output_file is "-", then the output will be written to stdout.
+
+ rclone completion fish [output_file] [flags]
Options
- -h, --help help for fish
- --no-descriptions disable completion descriptions
+ -h, --help help for fish
See the global flags page for global options not listed here.
SEE ALSO
-- rclone completion - Generate the autocompletion script for the
- specified shell
+- rclone completion - Output completion script for a given shell.
rclone completion powershell
@@ -2115,46 +2144,37 @@ SEE ALSO
rclone completion zsh
-Generate the autocompletion script for zsh
+Output zsh completion script for rclone.
Synopsis
-Generate the autocompletion script for the zsh shell.
+Generates a zsh autocompletion script for rclone.
-If shell completion is not already enabled in your environment you will
-need to enable it. You can execute the following once:
+This writes to /usr/share/zsh/vendor-completions/_rclone by default so
+will probably need to be run with sudo or as root, e.g.
- echo "autoload -U compinit; compinit" >> ~/.zshrc
+ sudo rclone genautocomplete zsh
-To load completions in your current shell session:
+Logout and login again to use the autocompletion scripts, or source them
+directly
- source <(rclone completion zsh); compdef _rclone rclone
+ autoload -U compinit && compinit
-To load completions for every new session, execute once:
+If you supply a command line argument the script will be written there.
-Linux:
+If output_file is "-", then the output will be written to stdout.
- rclone completion zsh > "${fpath[1]}/_rclone"
-
-macOS:
-
- rclone completion zsh > $(brew --prefix)/share/zsh/site-functions/_rclone
-
-You will need to start a new shell for this setup to take effect.
-
- rclone completion zsh [flags]
+ rclone completion zsh [output_file] [flags]
Options
- -h, --help help for zsh
- --no-descriptions disable completion descriptions
+ -h, --help help for zsh
See the global flags page for global options not listed here.
SEE ALSO
-- rclone completion - Generate the autocompletion script for the
- specified shell
+- rclone completion - Output completion script for a given shell.
rclone config create
@@ -2727,13 +2747,13 @@ SEE ALSO
rclone cryptcheck
-Cryptcheck checks the integrity of a crypted remote.
+Cryptcheck checks the integrity of an encrypted remote.
Synopsis
rclone cryptcheck checks a remote against a crypted remote. This is the
equivalent of running rclone check, but able to check the checksums of
-the crypted remote.
+the encrypted remote.
For it to work the underlying remote of the cryptedremote must support
some kind of checksum.
@@ -2780,6 +2800,9 @@ what happened to it. These are reminiscent of diff files.
- ! path means there was an error reading or hashing the source or
dest.
+The default number of parallel checks is 8. See the --checkers=N option
+for more information.
+
rclone cryptcheck remote:path cryptedremote:path [flags]
Options
@@ -3108,7 +3131,8 @@ SEE ALSO
rclone listremotes
-List all the remotes in the config file.
+List all the remotes in the config file and defined in environment
+variables.
Synopsis
@@ -3666,6 +3690,17 @@ or FUSE-T. macFUSE is a traditional FUSE driver utilizing a macOS kernel
extension (kext). FUSE-T is an alternative FUSE system which "mounts"
via an NFSv4 local server.
+macFUSE Notes
+
+If installing macFUSE using dmg packages from the website, rclone will
+locate the macFUSE libraries without any further intervention. If
+however, macFUSE is installed using the macports package manager, the
+following addition steps are required.
+
+ sudo mkdir /usr/local/lib
+ cd /usr/local/lib
+ sudo ln -s /opt/local/lib/libfuse.2.dylib
+
FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These
@@ -3797,19 +3832,18 @@ or create systemd mount units:
# /etc/systemd/system/mnt-data.mount
[Unit]
- After=network-online.target
+ Description=Mount for /mnt/data
[Mount]
Type=rclone
What=sftp1:subdir
Where=/mnt/data
- Options=rw,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
optionally accompanied by systemd automount unit
# /etc/systemd/system/mnt-data.automount
[Unit]
- After=network-online.target
- Before=remote-fs.target
+ Description=AutoMount for /mnt/data
[Automount]
Where=/mnt/data
TimeoutIdleSec=600
@@ -3920,7 +3954,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -3942,7 +3976,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -4186,6 +4231,7 @@ Options
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for mount
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -4197,7 +4243,7 @@ Options
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -4488,10 +4534,11 @@ streaming.
size of the stream is different in length to the --size passed in then
the transfer will likely fail.
-Note that the upload can also not be retried because the data is not
-kept around until the upload succeeds. If you need to transfer a lot of
-data, you're better off caching locally and then rclone move it to the
-destination.
+Note that the upload cannot be retried because the data is not stored.
+If the backend supports multipart uploading then individual chunks can
+be retried. If you need to transfer a lot of data, you may be better off
+caching it locally and then rclone move it to the destination which can
+use retries.
rclone rcat remote:path [flags]
@@ -4524,12 +4571,12 @@ See the rc documentation for more info on the rc flags.
Server options
-Use --addr to specify which IP address and port the server should listen
-on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By
-default it only listens on localhost. You can use port :0 to let the OS
-choose an available port.
+Use --rc-addr to specify which IP address and port the server should
+listen on, eg --rc-addr 1.2.3.4:8000 or --rc-addr :8080 to listen to all
+IPs. By default it only listens on localhost. You can use port :0 to let
+the OS choose an available port.
-If you set --addr to listen on a public or LAN accessible IP address
+If you set --rc-addr to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
You can use a unix socket by setting the url to unix:///path/to/socket
@@ -4537,41 +4584,41 @@ or just by using an absolute path name. Note that unix sockets bypass
the authentication - this is expected to be done with file system
permissions.
---addr may be repeated to listen on multiple IPs/ports/sockets.
+--rc-addr may be repeated to listen on multiple IPs/ports/sockets.
---server-read-timeout and --server-write-timeout can be used to control
-the timeouts on the server. Note that this is the total time for a
-transfer.
+--rc-server-read-timeout and --rc-server-write-timeout can be used to
+control the timeouts on the server. Note that this is the total time for
+a transfer.
---max-header-bytes controls the maximum number of bytes the server will
-accept in the HTTP header.
+--rc-max-header-bytes controls the maximum number of bytes the server
+will accept in the HTTP header.
---baseurl controls the URL prefix that rclone serves from. By default
-rclone will serve from the root. If you used --baseurl "/rclone" then
+--rc-baseurl controls the URL prefix that rclone serves from. By default
+rclone will serve from the root. If you used --rc-baseurl "/rclone" then
rclone would serve from a URL starting with "/rclone/". This is useful
if you wish to proxy rclone serve. Rclone automatically inserts leading
-and trailing "/" on --baseurl, so --baseurl "rclone",
---baseurl "/rclone" and --baseurl "/rclone/" are all treated
+and trailing "/" on --rc-baseurl, so --rc-baseurl "rclone",
+--rc-baseurl "/rclone" and --rc-baseurl "/rclone/" are all treated
identically.
TLS (SSL)
By default this will serve over http. If you want you can serve over
-https. You will need to supply the --cert and --key flags. If you wish
-to do client side certificate validation then you will need to supply
---client-ca also.
+https. You will need to supply the --rc-cert and --rc-key flags. If you
+wish to do client side certificate validation then you will need to
+supply --rc-client-ca also.
---cert should be a either a PEM encoded certificate or a concatenation
-of that with the CA certificate. --key should be the PEM encoded private
-key and --client-ca should be the PEM encoded client certificate
-authority certificate.
+--rc-cert should be a either a PEM encoded certificate or a
+concatenation of that with the CA certificate. --krc-ey should be the
+PEM encoded private key and --rc-client-ca should be the PEM encoded
+client certificate authority certificate.
---min-tls-version is minimum TLS version that is acceptable. Valid
+--rc-min-tls-version is minimum TLS version that is acceptable. Valid
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
Template
---template allows a user to specify a custom markup template for HTTP
+--rc-template allows a user to specify a custom markup template for HTTP
and WebDAV serve functions. The server exports the following markup to
be used within the template to server pages:
@@ -4626,10 +4673,15 @@ Authentication
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set
-a single username and password with the --user and --pass flags.
+a single username and password with the --rc-user and --rc-pass flags.
-Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in
-standard apache format and supports MD5, SHA1 and BCrypt for basic
+If no static users are configured by either of the above methods, and
+client certificates are required by the --client-ca flag passed to the
+server, the client certificate common name will be considered as the
+username.
+
+Use --rc-htpasswd /path/to/htpasswd to provide an htpasswd file. This is
+in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
To create an htpasswd file:
@@ -4640,9 +4692,9 @@ To create an htpasswd file:
The password file can be updated while rclone is running.
-Use --realm to set the authentication realm.
+Use --rc-realm to set the authentication realm.
-Use --salt to change the password hashing salt from the default.
+Use --rc-salt to change the password hashing salt from the default.
rclone rcd * [flags]
@@ -4897,7 +4949,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -4919,7 +4971,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -5162,7 +5225,7 @@ Options
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -5302,7 +5365,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -5324,7 +5387,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -5570,6 +5644,7 @@ Options
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for docker
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -5584,7 +5659,7 @@ Options
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -5709,7 +5784,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -5731,7 +5806,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -6048,7 +6134,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default "anonymous")
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -6192,6 +6278,11 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set
a single username and password with the --user and --pass flags.
+If no static users are configured by either of the above methods, and
+client certificates are required by the --client-ca flag passed to the
+server, the client certificate common name will be considered as the
+username.
+
Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in
standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -6283,7 +6374,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -6305,7 +6396,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -6630,7 +6732,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -6790,6 +6892,11 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set
a single username and password with the --user and --pass flags.
+If no static users are configured by either of the above methods, and
+client certificates are required by the --client-ca flag passed to the
+server, the client certificate common name will be considered as the
+username.
+
Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in
standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -6890,9 +6997,9 @@ commands to while the servers all have different views of the state of
the filing system.
The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from
-beeing used. Omitting "restrict" and using --sftp-path-override to
-enable checksumming is possible but less secure and you could use the
-SFTP server provided by OpenSSH in this case.
+being used. Omitting "restrict" and using --sftp-path-override to enable
+checksumming is possible but less secure and you could use the SFTP
+server provided by OpenSSH in this case.
VFS - Virtual File System
@@ -6969,7 +7076,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -6991,7 +7098,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -7308,7 +7426,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -7482,6 +7600,11 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or set
a single username and password with the --user and --pass flags.
+If no static users are configured by either of the above methods, and
+client certificates are required by the --client-ca flag passed to the
+server, the client certificate common name will be considered as the
+username.
+
Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in
standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -7573,7 +7696,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -7595,7 +7718,18 @@ be uploaded next time rclone is run with the same flags.
If using --vfs-cache-max-size note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
--vfs-cache-poll-interval. Secondly because open files cannot be evicted
-from the cache.
+from the cache. When --vfs-cache-max-size is exceeded, rclone will
+attempt to evict the least accessed files from the cache first. rclone
+will start with files that haven't been accessed for the longest. This
+cache flushing strategy is efficient and more relevant files are likely
+to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with
the same or overlapping remotes if using --vfs-cache-mode > off. This
@@ -7922,7 +8056,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -7992,7 +8126,7 @@ Synopsis
Rclone test is used to run test commands.
-Select which test comand you want with the subcommand, eg
+Select which test command you want with the subcommand, eg
rclone test memory remote:
@@ -8974,7 +9108,7 @@ are incorrect as it would normally.
--color WHEN
-Specifiy when colors (and other ANSI codes) should be added to the
+Specify when colors (and other ANSI codes) should be added to the
output.
AUTO (default) only allows ANSI codes when the output is a terminal
@@ -9084,6 +9218,22 @@ You may also choose to encrypt the file.
When token-based authentication are used, the configuration file must be
writable, because rclone needs to update the tokens inside it.
+To reduce risk of corrupting an existing configuration file, rclone will
+not write directly to it when saving changes. Instead it will first
+write to a new, temporary, file. If a configuration file already
+existed, it will (on Unix systems) try to mirror its permissions to the
+new file. Then it will rename the existing file to a temporary name as
+backup. Next, rclone will rename the new file to the correct name,
+before finally cleaning up by deleting the backup file.
+
+If the configuration file path used by rclone is a symbolic link, then
+this will be evaluated and rclone will write to the resolved path,
+instead of overwriting the symbolic link. Temporary files used in the
+process (described above) will be written to the same parent directory
+as that of the resolved configuration file, but if this directory is
+also a symbolic link it will not be resolved and the temporary files
+will be written to the location of the directory symbolic link.
+
--contimeout=TIME
Set the connection timeout. This should be in go time format which looks
@@ -9113,6 +9263,18 @@ oldest, rename. The default is interactive.
See the dedupe command for more information as to what these options
mean.
+--default-time TIME
+
+If a file or directory does have a modification time rclone can read
+then rclone will display this fixed time instead.
+
+The default is 2000-01-01 00:00:00 UTC. This can be configured in any of
+the ways shown in the time or duration options.
+
+For example --default-time 2020-06-01 to set the default time to the 1st
+of June 2020 or --default-time 0s to set the default time to the time
+rclone started up.
+
--disable FEATURE,FEATURE,...
This disables a comma separated list of optional features. For example
@@ -9126,9 +9288,23 @@ To see a list of which features can be disabled use:
--disable help
+The features a remote has can be seen in JSON format with:
+
+ rclone backend features remote:
+
See the overview features and optional features to get an idea of which
feature does what.
+Note that some features can be set to true if they are true/false
+feature flag features by prefixing them with !. For example the
+CaseInsensitive feature can be forced to false with
+--disable CaseInsensitive and forced to true with
+--disable '!CaseInsensitive'. In general it isn't a good idea doing this
+but it may be useful in extremis.
+
+(Note that ! is a shell command which you will need to escape with
+single quotes or a backslash on unix like platforms.)
+
This flag can be useful for debugging and in exceptional circumstances
(e.g. Google Drive limiting the total volume of Server Side Copies to
100 GiB/day).
@@ -9348,6 +9524,49 @@ This can be useful as an additional layer of protection for immutable or
append-only data sets (notably backup archives), where modification
implies corruption and should not be propagated.
+--inplace
+
+The --inplace flag changes the behaviour of rclone when uploading files
+to some backends (backends with the PartialUploads feature flag set)
+such as:
+
+- local
+- ftp
+- sftp
+
+Without --inplace (the default) rclone will first upload to a temporary
+file with an extension like this where XXXXXX represents a random
+string.
+
+ original-file-name.XXXXXX.partial
+
+(rclone will make sure the final name is no longer than 100 characters
+by truncating the original-file-name part if necessary).
+
+When the upload is complete, rclone will rename the .partial file to the
+correct name, overwriting any existing file at that point. If the upload
+fails then the .partial file will be deleted.
+
+This prevents other users of the backend from seeing partially uploaded
+files in their new names and prevents overwriting the old file until the
+new one is completely uploaded.
+
+If the --inplace flag is supplied, rclone will upload directly to the
+final name without creating a .partial file.
+
+This means that an incomplete file will be visible in the directory
+listings while the upload is in progress and any existing files will be
+overwritten as soon as the upload starts. If the transfer fails then the
+file will be deleted. This can cause data loss of the existing file if
+the transfer fails.
+
+Note that on the local file system if you don't use --inplace hard links
+(Unix only) will be broken. And if you do use --inplace you won't be
+able to update in use executables.
+
+Note also that versions of rclone prior to v1.63.0 behave as if the
+--inplace flag is always supplied.
+
-i, --interactive
This flag can be used to tell rclone that you wish a manual confirmation
@@ -9552,6 +9771,25 @@ reading and writing to an OS X filing system this will be 1s by default.
This command line flag allows you to override that computed default.
+--multi-thread-write-buffer-size=SIZE
+
+When downloading with multiple threads, rclone will buffer SIZE bytes in
+memory before writing to disk for each thread.
+
+This can improve performance if the underlying filesystem does not deal
+well with a lot of small writes in different positions of the file, so
+if you see downloads being limited by disk write speed, you might want
+to experiment with different values. Specially for magnetic drives and
+remote file systems a higher value can be useful.
+
+Nevertheless, the default of 128k should be fine for almost all use
+cases, so before changing it ensure that network is not really your
+bottleneck.
+
+As a final hint, size is not the only factor: block size (or similar
+concept) can have an impact. In one case, we observed that exact
+multiples of 16k performed much better than other values.
+
--multi-thread-cutoff=SIZE
When downloading files to the local backend above this size, rclone will
@@ -9949,6 +10187,12 @@ would be backed up to file.txt-2019-01-01 and with the flag it would be
backed up to file-2019-01-01.txt. This can be helpful to make sure the
suffixed files can still be opened.
+If a file has two (or more) extensions and the second (or subsequent)
+extension is recognised as a valid mime type, then the suffix will go
+before that extension. So file.tar.gz would be backed up to
+file-2019-01-01.tar.gz whereas file.badextension.gz would be backed up
+to file.badextension-2019-01-01.gz.
+
--syslog
On capable OSes (not Windows or Plan9) send all log output to syslog.
@@ -10950,7 +11194,7 @@ Which will match a directory called start with a file called end.jpg in
it as the .* will match / characters.
Note that you can use -vv --dump filters to show the filter patterns in
-regexp format - rclone implements the glob patters by transforming them
+regexp format - rclone implements the glob patterns by transforming them
into regular expressions.
Filter pattern examples
@@ -12249,7 +12493,7 @@ See the config dump command for more information on the above.
Authentication is required for this call.
-config/listremotes: Lists the remotes in the config file.
+config/listremotes: Lists the remotes in the config file and defined in environment variables.
Returns - remotes - array of remote names
@@ -12834,9 +13078,11 @@ operations/copyfile: Copy a file from source remote to destination remote
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for
+ local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/"
+ for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the
destination
@@ -13033,9 +13279,11 @@ operations/movefile: Move a file from source remote to destination remote
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for
+ local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/"
+ for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the
destination
@@ -13735,6 +13983,7 @@ Here is an overview of the major features of each cloud storage system.
OpenStack Swift MD5 R/W No No R/W -
Oracle Object Storage MD5 R/W No No R/W -
pCloud MD5, SHA1 ⁷ R No No W -
+ PikPak MD5 R No No R -
premiumize.me - - Yes No R -
put.io CRC-32 R/W No Yes R -
QingStor MD5 - ⁹ No No R/W -
@@ -13758,9 +14007,11 @@ Notes
² SFTP supports checksums if the same login has shell access and md5sum
or sha1sum as well as echo are in the remote's PATH.
-³ WebDAV supports hashes when used with Owncloud and Nextcloud only.
+³ WebDAV supports hashes when used with Fastmail Files. Owncloud and
+Nextcloud only.
-⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
+⁴ WebDAV supports modtimes when used with Fastmail Files, Owncloud and
+Nextcloud only.
⁵ QuickXorHash is Microsoft's own hash.
@@ -14194,7 +14445,7 @@ upon backend-specific capabilities.
Amazon S3 (or S3 compatible) No Yes No No Yes Yes Yes Yes No No
Backblaze B2 No Yes No No Yes Yes Yes Yes No No
Box Yes Yes Yes Yes Yes ‡‡ No Yes Yes Yes Yes
- Citrix ShareFile Yes Yes Yes Yes No No Yes No No Yes
+ Citrix ShareFile Yes Yes Yes Yes No No No No No Yes
Dropbox Yes Yes Yes Yes No No Yes Yes Yes Yes
Enterprise File Fabric Yes Yes Yes Yes Yes No No No No Yes
FTP No No Yes Yes No No Yes No No Yes
@@ -14216,6 +14467,7 @@ upon backend-specific capabilities.
OpenStack Swift Yes † Yes No No No Yes Yes No Yes No
Oracle Object Storage No Yes No No Yes Yes Yes No No No
pCloud Yes Yes Yes Yes Yes No No Yes Yes Yes
+ PikPak Yes Yes Yes Yes Yes No No Yes Yes Yes
premiumize.me Yes No Yes Yes No No No Yes Yes Yes
put.io Yes No Yes Yes Yes No Yes No Yes Yes
QingStor No Yes No No Yes Yes No No No No
@@ -14325,720 +14577,750 @@ Non Backend Flags
These flags are available for every command.
- --ask-password Allow prompt for password for encrypted configuration (default true)
- --auto-confirm If enabled, do not request console confirmation
- --backup-dir string Make backups into hierarchy based in DIR
- --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
- --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
- --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --ca-cert stringArray CA certificate used to verify servers
- --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
- --check-first Do all the checks before starting transfers
- --checkers int Number of checkers to run in parallel (default 8)
- -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
- --client-cert string Client SSL certificate (PEM) for mutual TLS auth
- --client-key string Client SSL private key (PEM) for mutual TLS auth
- --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
- --compare-dest stringArray Include additional comma separated server-side paths during comparison
- --config string Config file (default "$HOME/.config/rclone/rclone.conf")
- --contimeout Duration Connect timeout (default 1m0s)
- --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
- --cpuprofile string Write cpu profile to file
- --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
- --delete-after When synchronizing, delete files on destination after transferring (default)
- --delete-before When synchronizing, delete files on destination before transferring
- --delete-during When synchronizing, delete files during transfer
- --delete-excluded Delete files on dest excluded from sync
- --disable string Disable a comma separated list of features (use --disable help to see a list)
- --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
- --disable-http2 Disable HTTP/2 in the global transport
- -n, --dry-run Do a trial run with no permanent changes
- --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
- --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
- --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
- --dump-headers Dump HTTP headers - may contain sensitive info
- --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
- --fast-list Use recursive list if available; uses more memory but fewer transactions
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
- --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
- --header stringArray Set HTTP header for all transactions
- --header-download stringArray Set HTTP header for download transactions
- --header-upload stringArray Set HTTP header for upload transactions
- --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
- --ignore-case Ignore case in filters (case insensitive)
- --ignore-case-sync Ignore case when synchronizing
- --ignore-checksum Skip post copy check of checksums
- --ignore-errors Delete even if there are I/O errors
- --ignore-existing Skip all files that exist on destination
- --ignore-size Ignore size when skipping use mod-time or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
- --immutable Do not modify files, fail if existing files have been modified
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- -i, --interactive Enable interactive mode
- --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
- --log-file string Log everything to this file
- --log-format string Comma separated list of log format options (default "date,time")
- --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
- --log-systemd Activate systemd integration for the logger
- --low-level-retries int Number of low level retries to do (default 10)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
- --max-delete int When synchronizing, limit the number of deletes (default -1)
- --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
- --max-transfer SizeSuffix Maximum size of data to transfer (default off)
- --memprofile string Write memory profile to file
- -M, --metadata If set, preserve metadata when copying objects
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --metadata-set stringArray Add metadata key=value when uploading
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
- --modify-window Duration Max time diff to be considered the same (default 1ns)
- --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
- --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
- --no-check-certificate Do not verify the server SSL certificate (insecure)
- --no-check-dest Don't check the destination, copy regardless
- --no-console Hide console window (supported on Windows only)
- --no-gzip-encoding Don't set Accept-Encoding: gzip
- --no-traverse Don't traverse destination file system on copy
- --no-unicode-normalization Don't normalize unicode characters in filenames
- --no-update-modtime Don't update destination mod-time if files identical
- --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
- --password-command SpaceSepList Command for supplying password for encrypted configuration
- -P, --progress Show progress during transfer
- --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
- -q, --quiet Print as little stuff as possible
- --rc Enable the remote control server
- --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
- --rc-allow-origin string Set the allowed origin for CORS
- --rc-baseurl string Prefix for URLs - leave blank for root
- --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
- --rc-client-ca string Client certificate authority to verify clients with
- --rc-enable-metrics Enable prometheus metrics on /metrics
- --rc-files string Path to local files to serve on the HTTP server
- --rc-htpasswd string A htpasswd file - if not provided no authentication is done
- --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
- --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
- --rc-key string TLS PEM Private key
- --rc-max-header-bytes int Maximum size of request header (default 4096)
- --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
- --rc-no-auth Don't require auth for certain methods
- --rc-pass string Password for authentication
- --rc-realm string Realm for authentication
- --rc-salt string Password hashing salt (default "dlPL2MqE")
- --rc-serve Enable the serving of remote objects
- --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --rc-template string User-specified template
- --rc-user string User name for authentication
- --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
- --rc-web-gui Launch WebGUI on localhost
- --rc-web-gui-force-update Force update to latest version of web gui
- --rc-web-gui-no-open-browser Don't open the browser automatically
- --rc-web-gui-update Check and update to latest version of web gui
- --refresh-times Refresh the modtime of remote files
- --retries int Retry operations this many times if they fail (default 3)
- --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
- --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
- --size-only Skip based on size only, not mod-time or checksum
- --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
- --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
- --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
- --stats-one-line Make the stats fit on one line
- --stats-one-line-date Enable --stats-one-line and add current date/time prefix
- --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
- --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
- --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
- --suffix string Suffix to add to changed files
- --suffix-keep-extension Preserve the extension when using --suffix
- --syslog Use Syslog for logging
- --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
- --temp-dir string Directory rclone will use for temporary files (default "/tmp")
- --timeout Duration IO idle timeout (default 5m0s)
- --tpslimit float Limit HTTP transactions per second to this
- --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
- --track-renames When synchronizing, track file renames and do a server-side move if possible
- --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
- --transfers int Number of file transfers to run in parallel (default 4)
- -u, --update Skip files that are newer on the destination
- --use-cookies Enable session cookiejar
- --use-json-log Use json log format
- --use-mmap Use mmap allocator (see docs)
- --use-server-modtime Use server modified time instead of object metadata
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.62.0")
- -v, --verbose count Print lots more stuff (repeat for more)
+ --ask-password Allow prompt for password for encrypted configuration (default true)
+ --auto-confirm If enabled, do not request console confirmation
+ --backup-dir string Make backups into hierarchy based in DIR
+ --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
+ --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
+ --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --ca-cert stringArray CA certificate used to verify servers
+ --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
+ --check-first Do all the checks before starting transfers
+ --checkers int Number of checkers to run in parallel (default 8)
+ -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
+ --client-cert string Client SSL certificate (PEM) for mutual TLS auth
+ --client-key string Client SSL private key (PEM) for mutual TLS auth
+ --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
+ --compare-dest stringArray Include additional comma separated server-side paths during comparison
+ --config string Config file (default "$HOME/.config/rclone/rclone.conf")
+ --contimeout Duration Connect timeout (default 1m0s)
+ --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
+ --cpuprofile string Write cpu profile to file
+ --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
+ --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
+ --delete-after When synchronizing, delete files on destination after transferring (default)
+ --delete-before When synchronizing, delete files on destination before transferring
+ --delete-during When synchronizing, delete files during transfer
+ --delete-excluded Delete files on dest excluded from sync
+ --disable string Disable a comma separated list of features (use --disable help to see a list)
+ --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
+ --disable-http2 Disable HTTP/2 in the global transport
+ -n, --dry-run Do a trial run with no permanent changes
+ --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
+ --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
+ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
+ --dump-headers Dump HTTP headers - may contain sensitive info
+ --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
+ --fast-list Use recursive list if available; uses more memory but fewer transactions
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
+ --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
+ --header stringArray Set HTTP header for all transactions
+ --header-download stringArray Set HTTP header for download transactions
+ --header-upload stringArray Set HTTP header for upload transactions
+ --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
+ --ignore-case Ignore case in filters (case insensitive)
+ --ignore-case-sync Ignore case when synchronizing
+ --ignore-checksum Skip post copy check of checksums
+ --ignore-errors Delete even if there are I/O errors
+ --ignore-existing Skip all files that exist on destination
+ --ignore-size Ignore size when skipping use mod-time or checksum
+ -I, --ignore-times Don't skip files that match size and time - transfer all files
+ --immutable Do not modify files, fail if existing files have been modified
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --inplace Download directly to destination file instead of atomic download to temp/rename
+ -i, --interactive Enable interactive mode
+ --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
+ --log-file string Log everything to this file
+ --log-format string Comma separated list of log format options (default "date,time")
+ --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
+ --log-systemd Activate systemd integration for the logger
+ --low-level-retries int Number of low level retries to do (default 10)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
+ --max-delete int When synchronizing, limit the number of deletes (default -1)
+ --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
+ --max-transfer SizeSuffix Maximum size of data to transfer (default off)
+ --memprofile string Write memory profile to file
+ -M, --metadata If set, preserve metadata when copying objects
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --metadata-set stringArray Add metadata key=value when uploading
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+ --modify-window Duration Max time diff to be considered the same (default 1ns)
+ --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
+ --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
+ --multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
+ --no-check-certificate Do not verify the server SSL certificate (insecure)
+ --no-check-dest Don't check the destination, copy regardless
+ --no-console Hide console window (supported on Windows only)
+ --no-gzip-encoding Don't set Accept-Encoding: gzip
+ --no-traverse Don't traverse destination file system on copy
+ --no-unicode-normalization Don't normalize unicode characters in filenames
+ --no-update-modtime Don't update destination mod-time if files identical
+ --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
+ --password-command SpaceSepList Command for supplying password for encrypted configuration
+ -P, --progress Show progress during transfer
+ --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
+ -q, --quiet Print as little stuff as possible
+ --rc Enable the remote control server
+ --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
+ --rc-allow-origin string Set the allowed origin for CORS
+ --rc-baseurl string Prefix for URLs - leave blank for root
+ --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --rc-client-ca string Client certificate authority to verify clients with
+ --rc-enable-metrics Enable prometheus metrics on /metrics
+ --rc-files string Path to local files to serve on the HTTP server
+ --rc-htpasswd string A htpasswd file - if not provided no authentication is done
+ --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
+ --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
+ --rc-key string TLS PEM Private key
+ --rc-max-header-bytes int Maximum size of request header (default 4096)
+ --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
+ --rc-no-auth Don't require auth for certain methods
+ --rc-pass string Password for authentication
+ --rc-realm string Realm for authentication
+ --rc-salt string Password hashing salt (default "dlPL2MqE")
+ --rc-serve Enable the serving of remote objects
+ --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --rc-template string User-specified template
+ --rc-user string User name for authentication
+ --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
+ --rc-web-gui Launch WebGUI on localhost
+ --rc-web-gui-force-update Force update to latest version of web gui
+ --rc-web-gui-no-open-browser Don't open the browser automatically
+ --rc-web-gui-update Check and update to latest version of web gui
+ --refresh-times Refresh the modtime of remote files
+ --retries int Retry operations this many times if they fail (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
+ --size-only Skip based on size only, not mod-time or checksum
+ --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
+ --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
+ --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
+ --stats-one-line Make the stats fit on one line
+ --stats-one-line-date Enable --stats-one-line and add current date/time prefix
+ --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
+ --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
+ --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
+ --suffix string Suffix to add to changed files
+ --suffix-keep-extension Preserve the extension when using --suffix
+ --syslog Use Syslog for logging
+ --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
+ --temp-dir string Directory rclone will use for temporary files (default "/tmp")
+ --timeout Duration IO idle timeout (default 5m0s)
+ --tpslimit float Limit HTTP transactions per second to this
+ --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
+ --track-renames When synchronizing, track file renames and do a server-side move if possible
+ --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
+ --transfers int Number of file transfers to run in parallel (default 4)
+ -u, --update Skip files that are newer on the destination
+ --use-cookies Enable session cookiejar
+ --use-json-log Use json log format
+ --use-mmap Use mmap allocator (see docs)
+ --use-server-modtime Use server modified time instead of object metadata
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
+ -v, --verbose count Print lots more stuff (repeat for more)
Backend Flags
These flags are available for every command. They control the backends
and may be set in the config file.
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
- --alias-remote string Remote or path to alias
- --azureblob-access-tier string Access tier of blob: hot, cool or archive
- --azureblob-account string Azure Storage Account Name
- --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
- --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
- --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
- --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
- --azureblob-client-id string The ID of the client in use
- --azureblob-client-secret string One of the service principal's client secrets
- --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
- --azureblob-disable-checksum Don't store MD5 checksum with object metadata
- --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
- --azureblob-endpoint string Endpoint for the service
- --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
- --azureblob-key string Storage Account Shared Key
- --azureblob-list-chunk int Size of blob list (default 5000)
- --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
- --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-no-check-container If set, don't attempt to check the container exists or create it
- --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
- --azureblob-password string The user's password (obscured)
- --azureblob-public-access string Public access level of a container: blob or container
- --azureblob-sas-url string SAS URL for container level access only
- --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
- --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
- --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
- --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
- --azureblob-use-emulator Uses local storage emulator if provided as 'true'
- --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
- --azureblob-username string User name (usually an email address)
- --b2-account string Account ID or Application Key ID
- --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
- --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
- --b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
- --b2-download-url string Custom endpoint for downloads
- --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --b2-endpoint string Endpoint for the service
- --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
- --b2-key string Application Key
- --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
- --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --b2-version-at Time Show file versions as they were at the specified time (default off)
- --b2-versions Include old versions in directory listings
- --box-access-token string Box App Primary Access Token
- --box-auth-url string Auth server URL
- --box-box-config-file string Box App config.json location
- --box-box-sub-type string (default "user")
- --box-client-id string OAuth Client Id
- --box-client-secret string OAuth Client Secret
- --box-commit-retries int Max number of times to try committing a multipart file (default 100)
- --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
- --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
- --box-owned-by string Only show items owned by the login (email address) passed in
- --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
- --box-token string OAuth Access Token as a JSON blob
- --box-token-url string Token server url
- --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
- --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
- --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
- --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
- --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
- --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
- --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
- --cache-db-purge Clear all the cached data for this remote on start
- --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
- --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
- --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
- --cache-plex-password string The password of the Plex user (obscured)
- --cache-plex-url string The URL of the Plex server
- --cache-plex-username string The username of the Plex user
- --cache-read-retries int How many times to retry a read from a cache storage (default 10)
- --cache-remote string Remote to cache
- --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
- --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
- --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
- --cache-workers int How many workers should run in parallel to download chunks (default 4)
- --cache-writes Cache file data on writes through the FS
- --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
- --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
- --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
- --chunker-remote string Remote to chunk/unchunk
- --combine-upstreams SpaceSepList Upstreams for combining
- --compress-level int GZIP compression level (-2 to 9) (default -1)
- --compress-mode string Compression mode (default "gzip")
- --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
- --compress-remote string Remote to compress
- -L, --copy-links Follow symlinks and copy the pointed to item
- --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
- --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
- --crypt-filename-encryption string How to encrypt the filenames (default "standard")
- --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
- --crypt-password string Password or pass phrase for encryption (obscured)
- --crypt-password2 string Password or pass phrase for salt (obscured)
- --crypt-remote string Remote to encrypt/decrypt
- --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs
- --crypt-show-mapping For all files listed show how the names encrypt
- --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
- --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
- --drive-auth-owner-only Only consider files owned by the authenticated user
- --drive-auth-url string Auth server URL
- --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
- --drive-client-id string Google Application Client Id
- --drive-client-secret string OAuth Client Secret
- --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
- --drive-disable-http2 Disable drive using http2 (default true)
- --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
- --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
- --drive-formats string Deprecated: See export_formats
- --drive-impersonate string Impersonate this user when using a service account
- --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
- --drive-keep-revision-forever Keep new head revision of each file forever
- --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
- --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
- --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
- --drive-resource-key string Resource key for accessing a link-shared file
- --drive-root-folder-id string ID of the root folder
- --drive-scope string Scope that rclone should use when requesting access from drive
- --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs
- --drive-service-account-credentials string Service Account Credentials JSON blob
- --drive-service-account-file string Service Account Credentials JSON file path
- --drive-shared-with-me Only show files that are shared with me
- --drive-size-as-quota Show sizes as storage quota usage, not actual size
- --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
- --drive-skip-dangling-shortcuts If set skip dangling shortcut files
- --drive-skip-gdocs Skip google documents in all listings
- --drive-skip-shortcuts If set skip shortcut files
- --drive-starred-only Only show files that are starred
- --drive-stop-on-download-limit Make download limit errors be fatal
- --drive-stop-on-upload-limit Make upload limit errors be fatal
- --drive-team-drive string ID of the Shared Drive (Team Drive)
- --drive-token string OAuth Access Token as a JSON blob
- --drive-token-url string Token server url
- --drive-trashed-only Only show files that are in the trash
- --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
- --drive-use-created-date Use file created date instead of modified date
- --drive-use-shared-date Use date file was shared instead of modified date
- --drive-use-trash Send files to the trash instead of deleting permanently (default true)
- --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
- --dropbox-auth-url string Auth server URL
- --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
- --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
- --dropbox-batch-size int Max number of files in upload batch
- --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
- --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
- --dropbox-client-id string OAuth Client Id
- --dropbox-client-secret string OAuth Client Secret
- --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
- --dropbox-impersonate string Impersonate this user when using a business account
- --dropbox-shared-files Instructs rclone to work on individual shared files
- --dropbox-shared-folders Instructs rclone to work on shared folders
- --dropbox-token string OAuth Access Token as a JSON blob
- --dropbox-token-url string Token server url
- --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
- --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
- --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
- --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
- --fichier-shared-folder string If you want to download a shared folder, add this parameter
- --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --filefabric-permanent-token string Permanent Authentication Token
- --filefabric-root-folder-id string ID of the root folder
- --filefabric-token string Session Token
- --filefabric-token-expiry string Token expiry time
- --filefabric-url string URL of the Enterprise File Fabric to connect to
- --filefabric-version string Version read from the file fabric
- --ftp-ask-password Allow asking for FTP password when needed
- --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
- --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
- --ftp-disable-epsv Disable using EPSV even if server advertises support
- --ftp-disable-mlsd Disable using MLSD even if server advertises support
- --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
- --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
- --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
- --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
- --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
- --ftp-host string FTP host to connect to
- --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --ftp-no-check-certificate Do not verify the TLS certificate of the server
- --ftp-pass string FTP password (obscured)
- --ftp-port int FTP port number (default 21)
- --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
- --ftp-tls Use Implicit FTPS (FTP over TLS)
- --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
- --ftp-user string FTP username (default "$USER")
- --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
- --gcs-anonymous Access public buckets and objects without credentials
- --gcs-auth-url string Auth server URL
- --gcs-bucket-acl string Access Control List for new buckets
- --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
- --gcs-client-id string OAuth Client Id
- --gcs-client-secret string OAuth Client Secret
- --gcs-decompress If set this will decompress gzip encoded objects
- --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gcs-endpoint string Endpoint for the service
- --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
- --gcs-location string Location for the newly created buckets
- --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --gcs-object-acl string Access Control List for new objects
- --gcs-project-number string Project number
- --gcs-service-account-file string Service Account Credentials JSON file path
- --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
- --gcs-token string OAuth Access Token as a JSON blob
- --gcs-token-url string Token server url
- --gphotos-auth-url string Auth server URL
- --gphotos-client-id string OAuth Client Id
- --gphotos-client-secret string OAuth Client Secret
- --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gphotos-include-archived Also view and download archived media
- --gphotos-read-only Set to make the Google Photos backend read only
- --gphotos-read-size Set to read the size of media items
- --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
- --gphotos-token string OAuth Access Token as a JSON blob
- --gphotos-token-url string Token server url
- --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
- --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
- --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
- --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
- --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
- --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
- --hdfs-namenode string Hadoop name node and port
- --hdfs-service-principal-name string Kerberos service principal name for the namenode
- --hdfs-username string Hadoop user name
- --hidrive-auth-url string Auth server URL
- --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
- --hidrive-client-id string OAuth Client Id
- --hidrive-client-secret string OAuth Client Secret
- --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
- --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
- --hidrive-root-prefix string The root/parent folder for all paths (default "/")
- --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
- --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
- --hidrive-token string OAuth Access Token as a JSON blob
- --hidrive-token-url string Token server url
- --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
- --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
- --http-headers CommaSepList Set HTTP headers for all transactions
- --http-no-head Don't use HEAD requests
- --http-no-slash Set this if the site doesn't end directories with /
- --http-url string URL of HTTP host to connect to
- --internetarchive-access-key-id string IAS3 Access Key
- --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
- --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
- --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
- --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
- --internetarchive-secret-access-key string IAS3 Secret Key (password)
- --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
- --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
- --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
- --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
- --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
- --jottacloud-trashed-only Only show files that are in the trash
- --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
- --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --koofr-endpoint string The Koofr API endpoint to use
- --koofr-mountid string Mount ID of the mount to use
- --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
- --koofr-provider string Choose your storage provider
- --koofr-setmtime Does the backend support setting modification time (default true)
- --koofr-user string Your user name
- -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
- --local-case-insensitive Force the filesystem to report itself as case insensitive
- --local-case-sensitive Force the filesystem to report itself as case sensitive
- --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --local-no-check-updated Don't check to see if the files change during upload
- --local-no-preallocate Disable preallocation of disk space for transferred files
- --local-no-set-modtime Disable setting modtime
- --local-no-sparse Disable sparse files for multi-thread downloads
- --local-nounc Disable UNC (long path names) conversion on Windows
- --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
- --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
- --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
- --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --mailru-pass string Password (obscured)
- --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
- --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
- --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
- --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
- --mailru-user string User name (usually email)
- --mega-debug Output more debug from Mega
- --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --mega-hard-delete Delete files permanently rather than putting them into the trash
- --mega-pass string Password (obscured)
- --mega-use-https Use HTTPS for transfers
- --mega-user string User name
- --netstorage-account string Set the NetStorage account name
- --netstorage-host string Domain+path of NetStorage host to connect to
- --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
- --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
- -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
- --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
- --onedrive-auth-url string Auth server URL
- --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
- --onedrive-client-id string OAuth Client Id
- --onedrive-client-secret string OAuth Client Secret
- --onedrive-drive-id string The ID of the drive to use
- --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
- --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
- --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
- --onedrive-link-password string Set the password for links created by the link command
- --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
- --onedrive-link-type string Set the type of the links created by the link command (default "view")
- --onedrive-list-chunk int Size of listing chunk (default 1000)
- --onedrive-no-versions Remove all versions on modifying operations
- --onedrive-region string Choose national cloud region for OneDrive (default "global")
- --onedrive-root-folder-id string ID of the root folder
- --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
- --onedrive-token string OAuth Access Token as a JSON blob
- --onedrive-token-url string Token server url
- --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --oos-compartment string Object storage compartment OCID
- --oos-config-file string Path to OCI config file (default "~/.oci/config")
- --oos-config-profile string Profile name inside the oci config file (default "Default")
- --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --oos-copy-timeout Duration Timeout for copy (default 1m0s)
- --oos-disable-checksum Don't store MD5 checksum with object metadata
- --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --oos-endpoint string Endpoint for Object storage API
- --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --oos-namespace string Object storage namespace
- --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --oos-provider string Choose your Auth Provider (default "env_auth")
- --oos-region string Object storage Region
- --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
- --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
- --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
- --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
- --oos-sse-kms-key-id string if using using your own master key in vault, this header specifies the
- --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
- --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
- --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
- --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
- --opendrive-password string Password (obscured)
- --opendrive-username string Username
- --pcloud-auth-url string Auth server URL
- --pcloud-client-id string OAuth Client Id
- --pcloud-client-secret string OAuth Client Secret
- --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
- --pcloud-password string Your pcloud password (obscured)
- --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
- --pcloud-token string OAuth Access Token as a JSON blob
- --pcloud-token-url string Token server url
- --pcloud-username string Your pcloud username
- --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --qingstor-access-key-id string QingStor Access Key ID
- --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
- --qingstor-connection-retries int Number of connection retries (default 3)
- --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
- --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
- --qingstor-env-auth Get QingStor credentials from runtime
- --qingstor-secret-access-key string QingStor Secret Access Key (password)
- --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
- --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --qingstor-zone string Zone to connect to
- --s3-access-key-id string AWS Access Key ID
- --s3-acl string Canned ACL used when creating buckets and storing or copying objects
- --s3-bucket-acl string Canned ACL used when creating buckets
- --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --s3-decompress If set this will decompress gzip encoded objects
- --s3-disable-checksum Don't store MD5 checksum with object metadata
- --s3-disable-http2 Disable usage of http2 for S3 backends
- --s3-download-url string Custom endpoint for downloads
- --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --s3-endpoint string Endpoint for S3 API
- --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
- --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
- --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
- --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
- --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
- --s3-location-constraint string Location constraint - must be set to match the Region
- --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
- --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
- --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --s3-no-head If set, don't HEAD uploaded objects to check integrity
- --s3-no-head-object If set, do not do HEAD before GET when getting objects
- --s3-no-system-metadata Suppress setting and reading of system metadata
- --s3-profile string Profile to use in the shared credentials file
- --s3-provider string Choose your S3 provider
- --s3-region string Region to connect to
- --s3-requester-pays Enables requester pays option when interacting with S3 bucket
- --s3-secret-access-key string AWS Secret Access Key (password)
- --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
- --s3-session-token string An AWS session token
- --s3-shared-credentials-file string Path to the shared credentials file
- --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
- --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
- --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
- --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
- --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
- --s3-storage-class string The storage class to use when storing new objects in S3
- --s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
- --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
- --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
- --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
- --s3-v2-auth If true use v2 authentication
- --s3-version-at Time Show file versions as they were at the specified time (default off)
- --s3-versions Include old versions in directory listings
- --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
- --seafile-create-library Should rclone create a library if it doesn't exist
- --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
- --seafile-library string Name of the library
- --seafile-library-key string Library password (for encrypted libraries only) (obscured)
- --seafile-pass string Password (obscured)
- --seafile-url string URL of seafile host to connect to
- --seafile-user string User name (usually email address)
- --sftp-ask-password Allow asking for SFTP password when needed
- --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
- --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
- --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
- --sftp-disable-concurrent-reads If set don't use concurrent reads
- --sftp-disable-concurrent-writes If set don't use concurrent writes
- --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
- --sftp-host string SSH host to connect to
- --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
- --sftp-key-file string Path to PEM-encoded private key file
- --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
- --sftp-key-pem string Raw PEM-encoded private key
- --sftp-key-use-agent When set forces the usage of the ssh-agent
- --sftp-known-hosts-file string Optional path to known_hosts file
- --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
- --sftp-md5sum-command string The command used to read md5 hashes
- --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
- --sftp-path-override string Override path used by SSH shell commands
- --sftp-port int SSH port number (default 22)
- --sftp-pubkey-file string Optional path to public key file
- --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
- --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
- --sftp-set-modtime Set the modified time on the remote if set (default true)
- --sftp-sha1sum-command string The command used to read sha1 hashes
- --sftp-shell-type string The type of SSH shell on remote server, if any
- --sftp-skip-links Set to skip any symlinks and any other non regular files
- --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
- --sftp-use-fstat If set use fstat instead of stat
- --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
- --sftp-user string SSH username (default "$USER")
- --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
- --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --sharefile-endpoint string Endpoint for API calls
- --sharefile-root-folder-id string ID of the root folder
- --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
- --sia-api-password string Sia Daemon API Password (obscured)
- --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
- --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
- --sia-user-agent string Siad User Agent (default "Sia-Agent")
- --skip-links Don't warn about skipped symlinks
- --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
- --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
- --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
- --smb-host string SMB server hostname to connect to
- --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --smb-pass string SMB password (obscured)
- --smb-port int SMB port number (default 445)
- --smb-spn string Service principal name
- --smb-user string SMB username (default "$USER")
- --storj-access-grant string Access grant
- --storj-api-key string API key
- --storj-passphrase string Encryption passphrase
- --storj-provider string Choose an authentication method (default "existing")
- --storj-satellite-address string Satellite address (default "us1.storj.io")
- --sugarsync-access-key-id string Sugarsync Access Key ID
- --sugarsync-app-id string Sugarsync App ID
- --sugarsync-authorization string Sugarsync authorization
- --sugarsync-authorization-expiry string Sugarsync authorization expiry
- --sugarsync-deleted-id string Sugarsync deleted folder id
- --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
- --sugarsync-hard-delete Permanently delete files if true
- --sugarsync-private-access-key string Sugarsync Private Access Key
- --sugarsync-refresh-token string Sugarsync refresh token
- --sugarsync-root-id string Sugarsync root id
- --sugarsync-user string Sugarsync user
- --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
- --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
- --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
- --swift-auth string Authentication URL for server (OS_AUTH_URL)
- --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
- --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
- --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
- --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
- --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
- --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
- --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
- --swift-key string API key or password (OS_PASSWORD)
- --swift-leave-parts-on-error If true avoid calling abort upload on a failure
- --swift-no-chunk Don't chunk files during streaming upload
- --swift-no-large-objects Disable support for static and dynamic large objects
- --swift-region string Region name - optional (OS_REGION_NAME)
- --swift-storage-policy string The storage policy to use when creating a new container
- --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
- --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
- --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
- --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
- --swift-user string User name to log in (OS_USERNAME)
- --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
- --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
- --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
- --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
- --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
- --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
- --union-upstreams string List of space separated upstreams
- --uptobox-access-token string Your access token
- --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
- --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
- --webdav-bearer-token-command string Command to run to get a bearer token
- --webdav-encoding string The encoding for the backend
- --webdav-headers CommaSepList Set HTTP headers for all transactions
- --webdav-pass string Password (obscured)
- --webdav-url string URL of http host to connect to
- --webdav-user string User name
- --webdav-vendor string Name of the WebDAV site/service/software you are using
- --yandex-auth-url string Auth server URL
- --yandex-client-id string OAuth Client Id
- --yandex-client-secret string OAuth Client Secret
- --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --yandex-hard-delete Delete files permanently rather than putting them into the trash
- --yandex-token string OAuth Access Token as a JSON blob
- --yandex-token-url string Token server url
- --zoho-auth-url string Auth server URL
- --zoho-client-id string OAuth Client Id
- --zoho-client-secret string OAuth Client Secret
- --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
- --zoho-region string Zoho region to connect to
- --zoho-token string OAuth Access Token as a JSON blob
- --zoho-token-url string Token server url
+ --acd-auth-url string Auth server URL
+ --acd-client-id string OAuth Client Id
+ --acd-client-secret string OAuth Client Secret
+ --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
+ --acd-token string OAuth Access Token as a JSON blob
+ --acd-token-url string Token server url
+ --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-remote string Remote or path to alias
+ --azureblob-access-tier string Access tier of blob: hot, cool or archive
+ --azureblob-account string Azure Storage Account Name
+ --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
+ --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
+ --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
+ --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
+ --azureblob-client-id string The ID of the client in use
+ --azureblob-client-secret string One of the service principal's client secrets
+ --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --azureblob-disable-checksum Don't store MD5 checksum with object metadata
+ --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
+ --azureblob-endpoint string Endpoint for the service
+ --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
+ --azureblob-key string Storage Account Shared Key
+ --azureblob-list-chunk int Size of blob list (default 5000)
+ --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
+ --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-no-check-container If set, don't attempt to check the container exists or create it
+ --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
+ --azureblob-password string The user's password (obscured)
+ --azureblob-public-access string Public access level of a container: blob or container
+ --azureblob-sas-url string SAS URL for container level access only
+ --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
+ --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
+ --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
+ --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
+ --azureblob-use-emulator Uses local storage emulator if provided as 'true'
+ --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
+ --azureblob-username string User name (usually an email address)
+ --b2-account string Account ID or Application Key ID
+ --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
+ --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-disable-checksum Disable checksums for large (> upload cutoff) files
+ --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-url string Custom endpoint for downloads
+ --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --b2-endpoint string Endpoint for the service
+ --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
+ --b2-key string Application Key
+ --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
+ --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --b2-version-at Time Show file versions as they were at the specified time (default off)
+ --b2-versions Include old versions in directory listings
+ --box-access-token string Box App Primary Access Token
+ --box-auth-url string Auth server URL
+ --box-box-config-file string Box App config.json location
+ --box-box-sub-type string (default "user")
+ --box-client-id string OAuth Client Id
+ --box-client-secret string OAuth Client Secret
+ --box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
+ --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
+ --box-owned-by string Only show items owned by the login (email address) passed in
+ --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
+ --box-token string OAuth Access Token as a JSON blob
+ --box-token-url string Token server url
+ --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
+ --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
+ --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
+ --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
+ --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
+ --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
+ --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
+ --cache-db-purge Clear all the cached data for this remote on start
+ --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
+ --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
+ --cache-plex-password string The password of the Plex user (obscured)
+ --cache-plex-url string The URL of the Plex server
+ --cache-plex-username string The username of the Plex user
+ --cache-read-retries int How many times to retry a read from a cache storage (default 10)
+ --cache-remote string Remote to cache
+ --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
+ --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
+ --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
+ --cache-workers int How many workers should run in parallel to download chunks (default 4)
+ --cache-writes Cache file data on writes through the FS
+ --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
+ --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
+ --chunker-remote string Remote to chunk/unchunk
+ --combine-upstreams SpaceSepList Upstreams for combining
+ --compress-level int GZIP compression level (-2 to 9) (default -1)
+ --compress-mode string Compression mode (default "gzip")
+ --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
+ --compress-remote string Remote to compress
+ -L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
+ --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
+ --crypt-filename-encryption string How to encrypt the filenames (default "standard")
+ --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
+ --crypt-pass-bad-blocks If set this will pass bad blocks through as all 0
+ --crypt-password string Password or pass phrase for encryption (obscured)
+ --crypt-password2 string Password or pass phrase for salt (obscured)
+ --crypt-remote string Remote to encrypt/decrypt
+ --crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
+ --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
+ --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
+ --drive-auth-owner-only Only consider files owned by the authenticated user
+ --drive-auth-url string Auth server URL
+ --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
+ --drive-client-id string Google Application Client Id
+ --drive-client-secret string OAuth Client Secret
+ --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-disable-http2 Disable drive using http2 (default true)
+ --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
+ --drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
+ --drive-formats string Deprecated: See export_formats
+ --drive-impersonate string Impersonate this user when using a service account
+ --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
+ --drive-keep-revision-forever Keep new head revision of each file forever
+ --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
+ --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
+ --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
+ --drive-resource-key string Resource key for accessing a link-shared file
+ --drive-root-folder-id string ID of the root folder
+ --drive-scope string Scope that rclone should use when requesting access from drive
+ --drive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --drive-service-account-credentials string Service Account Credentials JSON blob
+ --drive-service-account-file string Service Account Credentials JSON file path
+ --drive-shared-with-me Only show files that are shared with me
+ --drive-size-as-quota Show sizes as storage quota usage, not actual size
+ --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
+ --drive-skip-dangling-shortcuts If set skip dangling shortcut files
+ --drive-skip-gdocs Skip google documents in all listings
+ --drive-skip-shortcuts If set skip shortcut files
+ --drive-starred-only Only show files that are starred
+ --drive-stop-on-download-limit Make download limit errors be fatal
+ --drive-stop-on-upload-limit Make upload limit errors be fatal
+ --drive-team-drive string ID of the Shared Drive (Team Drive)
+ --drive-token string OAuth Access Token as a JSON blob
+ --drive-token-url string Token server url
+ --drive-trashed-only Only show files that are in the trash
+ --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
+ --drive-use-created-date Use file created date instead of modified date
+ --drive-use-shared-date Use date file was shared instead of modified date
+ --drive-use-trash Send files to the trash instead of deleting permanently (default true)
+ --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
+ --dropbox-auth-url string Auth server URL
+ --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
+ --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
+ --dropbox-batch-size int Max number of files in upload batch
+ --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
+ --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
+ --dropbox-client-id string OAuth Client Id
+ --dropbox-client-secret string OAuth Client Secret
+ --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
+ --dropbox-impersonate string Impersonate this user when using a business account
+ --dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --dropbox-shared-files Instructs rclone to work on individual shared files
+ --dropbox-shared-folders Instructs rclone to work on shared folders
+ --dropbox-token string OAuth Access Token as a JSON blob
+ --dropbox-token-url string Token server url
+ --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
+ --fichier-cdn Set if you wish to use CDN download links
+ --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
+ --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
+ --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
+ --fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --filefabric-permanent-token string Permanent Authentication Token
+ --filefabric-root-folder-id string ID of the root folder
+ --filefabric-token string Session Token
+ --filefabric-token-expiry string Token expiry time
+ --filefabric-url string URL of the Enterprise File Fabric to connect to
+ --filefabric-version string Version read from the file fabric
+ --ftp-ask-password Allow asking for FTP password when needed
+ --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
+ --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-disable-epsv Disable using EPSV even if server advertises support
+ --ftp-disable-mlsd Disable using MLSD even if server advertises support
+ --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
+ --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
+ --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
+ --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
+ --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
+ --ftp-host string FTP host to connect to
+ --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --ftp-no-check-certificate Do not verify the TLS certificate of the server
+ --ftp-pass string FTP password (obscured)
+ --ftp-port int FTP port number (default 21)
+ --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
+ --ftp-tls Use Implicit FTPS (FTP over TLS)
+ --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
+ --ftp-user string FTP username (default "$USER")
+ --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
+ --gcs-anonymous Access public buckets and objects without credentials
+ --gcs-auth-url string Auth server URL
+ --gcs-bucket-acl string Access Control List for new buckets
+ --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
+ --gcs-client-id string OAuth Client Id
+ --gcs-client-secret string OAuth Client Secret
+ --gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gcs-endpoint string Endpoint for the service
+ --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --gcs-location string Location for the newly created buckets
+ --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --gcs-object-acl string Access Control List for new objects
+ --gcs-project-number string Project number
+ --gcs-service-account-file string Service Account Credentials JSON file path
+ --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
+ --gcs-token string OAuth Access Token as a JSON blob
+ --gcs-token-url string Token server url
+ --gcs-user-project string User project
+ --gphotos-auth-url string Auth server URL
+ --gphotos-client-id string OAuth Client Id
+ --gphotos-client-secret string OAuth Client Secret
+ --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gphotos-include-archived Also view and download archived media
+ --gphotos-read-only Set to make the Google Photos backend read only
+ --gphotos-read-size Set to read the size of media items
+ --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
+ --gphotos-token string OAuth Access Token as a JSON blob
+ --gphotos-token-url string Token server url
+ --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
+ --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
+ --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
+ --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
+ --hdfs-namenode string Hadoop name node and port
+ --hdfs-service-principal-name string Kerberos service principal name for the namenode
+ --hdfs-username string Hadoop user name
+ --hidrive-auth-url string Auth server URL
+ --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
+ --hidrive-client-id string OAuth Client Id
+ --hidrive-client-secret string OAuth Client Secret
+ --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
+ --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
+ --hidrive-root-prefix string The root/parent folder for all paths (default "/")
+ --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
+ --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
+ --hidrive-token string OAuth Access Token as a JSON blob
+ --hidrive-token-url string Token server url
+ --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
+ --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-headers CommaSepList Set HTTP headers for all transactions
+ --http-no-head Don't use HEAD requests
+ --http-no-slash Set this if the site doesn't end directories with /
+ --http-url string URL of HTTP host to connect to
+ --internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
+ --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
+ --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
+ --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
+ --internetarchive-secret-access-key string IAS3 Secret Key (password)
+ --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
+ --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
+ --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
+ --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
+ --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
+ --jottacloud-trashed-only Only show files that are in the trash
+ --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --koofr-endpoint string The Koofr API endpoint to use
+ --koofr-mountid string Mount ID of the mount to use
+ --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
+ --koofr-provider string Choose your storage provider
+ --koofr-setmtime Does the backend support setting modification time (default true)
+ --koofr-user string Your user name
+ -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
+ --local-case-insensitive Force the filesystem to report itself as case insensitive
+ --local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --local-no-check-updated Don't check to see if the files change during upload
+ --local-no-preallocate Disable preallocation of disk space for transferred files
+ --local-no-set-modtime Disable setting modtime
+ --local-no-sparse Disable sparse files for multi-thread downloads
+ --local-nounc Disable UNC (long path names) conversion on Windows
+ --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
+ --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
+ --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
+ --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --mailru-pass string Password (obscured)
+ --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
+ --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
+ --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
+ --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
+ --mailru-user string User name (usually email)
+ --mega-debug Output more debug from Mega
+ --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --mega-hard-delete Delete files permanently rather than putting them into the trash
+ --mega-pass string Password (obscured)
+ --mega-use-https Use HTTPS for transfers
+ --mega-user string User name
+ --netstorage-account string Set the NetStorage account name
+ --netstorage-host string Domain+path of NetStorage host to connect to
+ --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
+ --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
+ -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
+ --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
+ --onedrive-auth-url string Auth server URL
+ --onedrive-av-override Allows download of files the server thinks has a virus
+ --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
+ --onedrive-client-id string OAuth Client Id
+ --onedrive-client-secret string OAuth Client Secret
+ --onedrive-drive-id string The ID of the drive to use
+ --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
+ --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
+ --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
+ --onedrive-link-password string Set the password for links created by the link command
+ --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
+ --onedrive-link-type string Set the type of the links created by the link command (default "view")
+ --onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-no-versions Remove all versions on modifying operations
+ --onedrive-region string Choose national cloud region for OneDrive (default "global")
+ --onedrive-root-folder-id string ID of the root folder
+ --onedrive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --onedrive-token string OAuth Access Token as a JSON blob
+ --onedrive-token-url string Token server url
+ --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --oos-compartment string Object storage compartment OCID
+ --oos-config-file string Path to OCI config file (default "~/.oci/config")
+ --oos-config-profile string Profile name inside the oci config file (default "Default")
+ --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-disable-checksum Don't store MD5 checksum with object metadata
+ --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --oos-endpoint string Endpoint for Object storage API
+ --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --oos-namespace string Object storage namespace
+ --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --oos-provider string Choose your Auth Provider (default "env_auth")
+ --oos-region string Object storage Region
+ --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
+ --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
+ --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
+ --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
+ --oos-sse-kms-key-id string if using your own master key in vault, this header specifies the
+ --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
+ --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
+ --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
+ --opendrive-password string Password (obscured)
+ --opendrive-username string Username
+ --pcloud-auth-url string Auth server URL
+ --pcloud-client-id string OAuth Client Id
+ --pcloud-client-secret string OAuth Client Secret
+ --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
+ --pcloud-password string Your pcloud password (obscured)
+ --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
+ --pcloud-token string OAuth Access Token as a JSON blob
+ --pcloud-token-url string Token server url
+ --pcloud-username string Your pcloud username
+ --pikpak-auth-url string Auth server URL
+ --pikpak-client-id string OAuth Client Id
+ --pikpak-client-secret string OAuth Client Secret
+ --pikpak-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
+ --pikpak-pass string Pikpak password (obscured)
+ --pikpak-root-folder-id string ID of the root folder
+ --pikpak-token string OAuth Access Token as a JSON blob
+ --pikpak-token-url string Token server url
+ --pikpak-trashed-only Only show files that are in the trash
+ --pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
+ --pikpak-user string Pikpak username
+ --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --qingstor-access-key-id string QingStor Access Key ID
+ --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
+ --qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
+ --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
+ --qingstor-env-auth Get QingStor credentials from runtime
+ --qingstor-secret-access-key string QingStor Secret Access Key (password)
+ --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
+ --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --qingstor-zone string Zone to connect to
+ --s3-access-key-id string AWS Access Key ID
+ --s3-acl string Canned ACL used when creating buckets and storing or copying objects
+ --s3-bucket-acl string Canned ACL used when creating buckets
+ --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --s3-decompress If set this will decompress gzip encoded objects
+ --s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --s3-disable-checksum Don't store MD5 checksum with object metadata
+ --s3-disable-http2 Disable usage of http2 for S3 backends
+ --s3-download-url string Custom endpoint for downloads
+ --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --s3-endpoint string Endpoint for S3 API
+ --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
+ --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
+ --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
+ --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
+ --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
+ --s3-location-constraint string Location constraint - must be set to match the Region
+ --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
+ --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
+ --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --s3-no-head If set, don't HEAD uploaded objects to check integrity
+ --s3-no-head-object If set, do not do HEAD before GET when getting objects
+ --s3-no-system-metadata Suppress setting and reading of system metadata
+ --s3-profile string Profile to use in the shared credentials file
+ --s3-provider string Choose your S3 provider
+ --s3-region string Region to connect to
+ --s3-requester-pays Enables requester pays option when interacting with S3 bucket
+ --s3-secret-access-key string AWS Secret Access Key (password)
+ --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
+ --s3-session-token string An AWS session token
+ --s3-shared-credentials-file string Path to the shared credentials file
+ --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
+ --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
+ --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
+ --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
+ --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
+ --s3-storage-class string The storage class to use when storing new objects in S3
+ --s3-sts-endpoint string Endpoint for STS
+ --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
+ --s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
+ --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
+ --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
+ --s3-v2-auth If true use v2 authentication
+ --s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-versions Include old versions in directory listings
+ --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
+ --seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
+ --seafile-library string Name of the library
+ --seafile-library-key string Library password (for encrypted libraries only) (obscured)
+ --seafile-pass string Password (obscured)
+ --seafile-url string URL of seafile host to connect to
+ --seafile-user string User name (usually email address)
+ --sftp-ask-password Allow asking for SFTP password when needed
+ --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
+ --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
+ --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
+ --sftp-disable-concurrent-reads If set don't use concurrent reads
+ --sftp-disable-concurrent-writes If set don't use concurrent writes
+ --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
+ --sftp-host string SSH host to connect to
+ --sftp-host-key-algorithms SpaceSepList Space separated list of host key algorithms, ordered by preference
+ --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
+ --sftp-key-file string Path to PEM-encoded private key file
+ --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
+ --sftp-key-pem string Raw PEM-encoded private key
+ --sftp-key-use-agent When set forces the usage of the ssh-agent
+ --sftp-known-hosts-file string Optional path to known_hosts file
+ --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
+ --sftp-md5sum-command string The command used to read md5 hashes
+ --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
+ --sftp-path-override string Override path used by SSH shell commands
+ --sftp-port int SSH port number (default 22)
+ --sftp-pubkey-file string Optional path to public key file
+ --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
+ --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
+ --sftp-set-modtime Set the modified time on the remote if set (default true)
+ --sftp-sha1sum-command string The command used to read sha1 hashes
+ --sftp-shell-type string The type of SSH shell on remote server, if any
+ --sftp-skip-links Set to skip any symlinks and any other non regular files
+ --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
+ --sftp-use-fstat If set use fstat instead of stat
+ --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
+ --sftp-user string SSH username (default "$USER")
+ --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
+ --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --sharefile-endpoint string Endpoint for API calls
+ --sharefile-root-folder-id string ID of the root folder
+ --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
+ --sia-api-password string Sia Daemon API Password (obscured)
+ --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
+ --sia-user-agent string Siad User Agent (default "Sia-Agent")
+ --skip-links Don't warn about skipped symlinks
+ --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
+ --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
+ --smb-host string SMB server hostname to connect to
+ --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --smb-pass string SMB password (obscured)
+ --smb-port int SMB port number (default 445)
+ --smb-spn string Service principal name
+ --smb-user string SMB username (default "$USER")
+ --storj-access-grant string Access grant
+ --storj-api-key string API key
+ --storj-passphrase string Encryption passphrase
+ --storj-provider string Choose an authentication method (default "existing")
+ --storj-satellite-address string Satellite address (default "us1.storj.io")
+ --sugarsync-access-key-id string Sugarsync Access Key ID
+ --sugarsync-app-id string Sugarsync App ID
+ --sugarsync-authorization string Sugarsync authorization
+ --sugarsync-authorization-expiry string Sugarsync authorization expiry
+ --sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
+ --sugarsync-hard-delete Permanently delete files if true
+ --sugarsync-private-access-key string Sugarsync Private Access Key
+ --sugarsync-refresh-token string Sugarsync refresh token
+ --sugarsync-root-id string Sugarsync root id
+ --sugarsync-user string Sugarsync user
+ --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
+ --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
+ --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
+ --swift-auth string Authentication URL for server (OS_AUTH_URL)
+ --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+ --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+ --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+ --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
+ --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
+ --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
+ --swift-key string API key or password (OS_PASSWORD)
+ --swift-leave-parts-on-error If true avoid calling abort upload on a failure
+ --swift-no-chunk Don't chunk files during streaming upload
+ --swift-no-large-objects Disable support for static and dynamic large objects
+ --swift-region string Region name - optional (OS_REGION_NAME)
+ --swift-storage-policy string The storage policy to use when creating a new container
+ --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
+ --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+ --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+ --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+ --swift-user string User name to log in (OS_USERNAME)
+ --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
+ --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
+ --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
+ --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
+ --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
+ --union-upstreams string List of space separated upstreams
+ --uptobox-access-token string Your access token
+ --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
+ --uptobox-private Set to make uploaded files private
+ --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
+ --webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-encoding string The encoding for the backend
+ --webdav-headers CommaSepList Set HTTP headers for all transactions
+ --webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --webdav-pass string Password (obscured)
+ --webdav-url string URL of http host to connect to
+ --webdav-user string User name
+ --webdav-vendor string Name of the WebDAV site/service/software you are using
+ --yandex-auth-url string Auth server URL
+ --yandex-client-id string OAuth Client Id
+ --yandex-client-secret string OAuth Client Secret
+ --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --yandex-hard-delete Delete files permanently rather than putting them into the trash
+ --yandex-token string OAuth Access Token as a JSON blob
+ --yandex-token-url string Token server url
+ --zoho-auth-url string Auth server URL
+ --zoho-client-id string OAuth Client Id
+ --zoho-client-secret string OAuth Client Secret
+ --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
+ --zoho-region string Zoho region to connect to
+ --zoho-token string OAuth Access Token as a JSON blob
+ --zoho-token-url string Token server url
Docker Volume Plugin
@@ -16163,7 +16445,7 @@ Example filters file for Dropbox
# NOTICE: If you make changes to this file you MUST do a --resync run.
# Run with --dry-run to see what changes will be made.
- # Dropbox wont sync some files so filter them away here.
+ # Dropbox won't sync some files so filter them away here.
# See https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
- .dropbox.attr
- ~*.tmp
@@ -16555,7 +16837,7 @@ Supported test commands
and re-creating the parent would change its ID.
- delete-file Delete a single file.
- delete-glob Delete a group of files located one
- level deep in the given directory with names maching a given glob
+ level deep in the given directory with names matching a given glob
pattern.
- touch-glob YYYY-MM-DD Change modification time on a
group of files.
@@ -16824,6 +17106,17 @@ Properties:
- Type: string
- Required: false
+--fichier-cdn
+
+Set if you wish to use CDN download links.
+
+Properties:
+
+- Config: cdn
+- Env Var: RCLONE_FICHIER_CDN
+- Type: bool
+- Default: false
+
--fichier-encoding
The encoding for the backend.
@@ -17293,12 +17586,14 @@ The S3 backend can be used with a number of different providers:
- Arvan Cloud Object Storage (AOS)
- DigitalOcean Spaces
- Dreamhost
+- GCS
- Huawei OBS
- IBM COS S3
- IDrive e2
- IONOS Cloud
- Liara Object Storage
- Minio
+- Petabox
- Qiniu Cloud Object Storage (Kodo)
- RackCorp Object Storage
- Scaleway
@@ -17652,7 +17947,12 @@ However for objects which were uploaded as multipart uploads or with
server side encryption (SSE-AWS or SSE-C) the ETag header is no longer
the MD5 sum of the data, so rclone adds an additional piece of metadata
X-Amz-Meta-Md5chksum which is a base64 encoded MD5 hash (in the same
-format as is required for Content-MD5).
+format as is required for Content-MD5). You can use base64 -d and
+hexdump to check this value manually:
+
+ echo 'VWTGdNx3LyXQDfA0e2Edxw==' | base64 -d | hexdump
+
+or you can use rclone check to verify the hashes are OK.
For large objects, calculating this hash can take some time so the
addition of this hash can be disabled with --s3-disable-checksum. This
@@ -17913,10 +18213,11 @@ be uploaded as multipart.
Standard options
Here are the Standard options specific to s3 (Amazon S3 Compliant
-Storage Providers including AWS, Alibaba, Ceph, China Mobile,
-Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
-IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp,
-Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China
+Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
+IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox,
+RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and
+Wasabi).
--s3-provider
@@ -17933,18 +18234,20 @@ Properties:
- Amazon Web Services (AWS) S3
- "Alibaba"
- Alibaba Cloud Object Storage System (OSS) formerly Aliyun
+ - "ArvanCloud"
+ - Arvan Cloud Object Storage (AOS)
- "Ceph"
- Ceph Object Storage
- "ChinaMobile"
- China Mobile Ecloud Elastic Object Storage (EOS)
- "Cloudflare"
- Cloudflare R2 Storage
- - "ArvanCloud"
- - Arvan Cloud Object Storage (AOS)
- "DigitalOcean"
- DigitalOcean Spaces
- "Dreamhost"
- Dreamhost DreamObjects
+ - "GCS"
+ - Google Cloud Storage
- "HuaweiOBS"
- Huawei Object Storage Service
- "IBMCOS"
@@ -17961,6 +18264,8 @@ Properties:
- Minio Object Storage
- "Netease"
- Netease Object Storage (NOS)
+ - "Petabox"
+ - Petabox Object Storage
- "RackCorp"
- RackCorp Object Storage
- "Scaleway"
@@ -18301,6 +18606,29 @@ Properties:
--s3-region
+Region where your bucket will be created and your data stored.
+
+Properties:
+
+- Config: region
+- Env Var: RCLONE_S3_REGION
+- Provider: Petabox
+- Type: string
+- Required: false
+- Examples:
+ - "us-east-1"
+ - US East (N. Virginia)
+ - "eu-central-1"
+ - Europe (Frankfurt)
+ - "ap-southeast-1"
+ - Asia Pacific (Singapore)
+ - "me-south-1"
+ - Middle East (Bahrain)
+ - "sa-east-1"
+ - South America (São Paulo)
+
+--s3-region
+
Region to connect to.
Leave blank if you are using an S3 clone and you don't have a region.
@@ -18310,7 +18638,7 @@ Properties:
- Config: region
- Env Var: RCLONE_S3_REGION
- Provider:
- !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
+ !AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
- Type: string
- Required: false
- Examples:
@@ -18421,10 +18749,10 @@ Properties:
- Type: string
- Required: false
- Examples:
- - "s3.ir-thr-at1.arvanstorage.com"
+ - "s3.ir-thr-at1.arvanstorage.ir"
- The default endpoint - a good choice if you are unsure.
- - Tehran Iran (Asiatech)
- - "s3.ir-tbz-sh1.arvanstorage.com"
+ - Tehran Iran (Simin)
+ - "s3.ir-tbz-sh1.arvanstorage.ir"
- Tabriz Iran (Shahriar)
--s3-endpoint
@@ -18589,6 +18917,33 @@ Properties:
--s3-endpoint
+Endpoint for Petabox S3 Object Storage.
+
+Specify the endpoint from the same region.
+
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: Petabox
+- Type: string
+- Required: true
+- Examples:
+ - "s3.petabox.io"
+ - US East (N. Virginia)
+ - "s3.us-east-1.petabox.io"
+ - US East (N. Virginia)
+ - "s3.eu-central-1.petabox.io"
+ - Europe (Frankfurt)
+ - "s3.ap-southeast-1.petabox.io"
+ - Asia Pacific (Singapore)
+ - "s3.me-south-1.petabox.io"
+ - Middle East (Bahrain)
+ - "s3.sa-east-1.petabox.io"
+ - South America (São Paulo)
+
+--s3-endpoint
+
Endpoint for Liara Object Storage API.
Properties:
@@ -18749,6 +19104,21 @@ Properties:
--s3-endpoint
+Endpoint for Google Cloud Storage.
+
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: GCS
+- Type: string
+- Required: false
+- Examples:
+ - "https://storage.googleapis.com"
+ - Google Cloud Storage endpoint
+
+--s3-endpoint
+
Endpoint for Storj Gateway.
Properties:
@@ -18902,7 +19272,7 @@ Properties:
- Config: endpoint
- Env Var: RCLONE_S3_ENDPOINT
- Provider:
- !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu
+ !AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox
- Type: string
- Required: false
- Examples:
@@ -18956,8 +19326,10 @@ Properties:
- Wasabi AP Southeast 2 (Sydney)
- "storage.iran.liara.space"
- Liara Iran endpoint
- - "s3.ir-thr-at1.arvanstorage.com"
- - ArvanCloud Tehran Iran (Asiatech) endpoint
+ - "s3.ir-thr-at1.arvanstorage.ir"
+ - ArvanCloud Tehran Iran (Simin) endpoint
+ - "s3.ir-tbz-sh1.arvanstorage.ir"
+ - ArvanCloud Tabriz Iran (Shahriar) endpoint
--s3-location-constraint
@@ -19114,7 +19486,7 @@ Properties:
- Required: false
- Examples:
- "ir-thr-at1"
- - Tehran Iran (Asiatech)
+ - Tehran Iran (Simin)
- "ir-tbz-sh1"
- Tabriz Iran (Shahriar)
@@ -19289,7 +19661,7 @@ Properties:
- Config: location_constraint
- Env Var: RCLONE_S3_LOCATION_CONSTRAINT
- Provider:
- !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS
+ !AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox
- Type: string
- Required: false
@@ -19543,10 +19915,17 @@ Properties:
- "STANDARD"
- The Standard class for any upload.
- Suitable for on-demand content like streaming or CDN.
+ - Available in all regions.
- "GLACIER"
- Archived storage.
- Prices are lower, but it needs to be restored first to be
accessed.
+ - Available in FR-PAR and NL-AMS regions.
+ - "ONEZONE_IA"
+ - One Zone - Infrequent Access.
+ - A good choice for storing secondary backup copies or easily
+ re-creatable data.
+ - Available in the FR-PAR region only.
--s3-storage-class
@@ -19572,10 +19951,11 @@ Properties:
Advanced options
Here are the Advanced options specific to s3 (Amazon S3 Compliant
-Storage Providers including AWS, Alibaba, Ceph, China Mobile,
-Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
-IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp,
-Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China
+Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
+IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox,
+RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and
+Wasabi).
--s3-bucket-acl
@@ -20126,6 +20506,21 @@ Properties:
- Type: string
- Required: false
+--s3-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is
+created
+
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with "/", to persist the folder.
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_S3_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
--s3-use-multipart-etag
Whether to use ETag in multipart uploads for verification
@@ -20238,6 +20633,29 @@ Properties:
- Type: Tristate
- Default: unset
+--s3-use-accept-encoding-gzip
+
+Whether to send Accept-Encoding: gzip header.
+
+By default, rclone will append Accept-Encoding: gzip to the request to
+download compressed objects whenever possible.
+
+However some providers such as Google Cloud Storage may alter the HTTP
+headers, breaking the signature of the request.
+
+A symptom of this would be receiving errors like
+
+ SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
+
+In this case, you might want to try disabling this option.
+
+Properties:
+
+- Config: use_accept_encoding_gzip
+- Env Var: RCLONE_S3_USE_ACCEPT_ENCODING_GZIP
+- Type: Tristate
+- Default: unset
+
--s3-no-system-metadata
Suppress setting and reading of system metadata
@@ -20672,6 +21090,21 @@ in your config:
server_side_encryption =
storage_class =
+Google Cloud Storage
+
+GoogleCloudStorage is an S3-interoperable object storage service from
+Google Cloud Platform.
+
+To connect to Google Cloud Storage you will need an access key and
+secret key. These can be retrieved by creating an HMAC key.
+
+ [gs]
+ type = s3
+ provider = GCS
+ access_key_id = your_access_key
+ secret_access_key = your_secret_key
+ endpoint = https://storage.googleapis.com
+
DigitalOcean Spaces
Spaces is an S3-interoperable object storage service from cloud provider
@@ -22561,6 +22994,159 @@ For Netease NOS configure as per the configurator rclone config setting
the provider Netease. This will automatically set
force_path_style = false which is necessary for it to run properly.
+Petabox
+
+Here is an example of making a Petabox configuration. First run:
+
+ rclone config
+
+This will guide you through an interactive setup process.
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ n/s> n
+
+ Enter name for new remote.
+ name> My Petabox Storage
+
+ Option Storage.
+ Type of storage to configure.
+ Choose a number from below, or type in your own value.
+ [snip]
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \ "s3"
+ [snip]
+ Storage> s3
+
+ Option provider.
+ Choose your S3 provider.
+ Choose a number from below, or type in your own value.
+ Press Enter to leave empty.
+ [snip]
+ XX / Petabox Object Storage
+ \ (Petabox)
+ [snip]
+ provider> Petabox
+
+ Option env_auth.
+ Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
+ Only applies if access_key_id and secret_access_key is blank.
+ Choose a number from below, or type in your own boolean value (true or false).
+ Press Enter for the default (false).
+ 1 / Enter AWS credentials in the next step.
+ \ (false)
+ 2 / Get AWS credentials from the environment (env vars or IAM).
+ \ (true)
+ env_auth> 1
+
+ Option access_key_id.
+ AWS Access Key ID.
+ Leave blank for anonymous access or runtime credentials.
+ Enter a value. Press Enter to leave empty.
+ access_key_id> YOUR_ACCESS_KEY_ID
+
+ Option secret_access_key.
+ AWS Secret Access Key (password).
+ Leave blank for anonymous access or runtime credentials.
+ Enter a value. Press Enter to leave empty.
+ secret_access_key> YOUR_SECRET_ACCESS_KEY
+
+ Option region.
+ Region where your bucket will be created and your data stored.
+ Choose a number from below, or type in your own value.
+ Press Enter to leave empty.
+ 1 / US East (N. Virginia)
+ \ (us-east-1)
+ 2 / Europe (Frankfurt)
+ \ (eu-central-1)
+ 3 / Asia Pacific (Singapore)
+ \ (ap-southeast-1)
+ 4 / Middle East (Bahrain)
+ \ (me-south-1)
+ 5 / South America (São Paulo)
+ \ (sa-east-1)
+ region> 1
+
+ Option endpoint.
+ Endpoint for Petabox S3 Object Storage.
+ Specify the endpoint from the same region.
+ Choose a number from below, or type in your own value.
+ 1 / US East (N. Virginia)
+ \ (s3.petabox.io)
+ 2 / US East (N. Virginia)
+ \ (s3.us-east-1.petabox.io)
+ 3 / Europe (Frankfurt)
+ \ (s3.eu-central-1.petabox.io)
+ 4 / Asia Pacific (Singapore)
+ \ (s3.ap-southeast-1.petabox.io)
+ 5 / Middle East (Bahrain)
+ \ (s3.me-south-1.petabox.io)
+ 6 / South America (São Paulo)
+ \ (s3.sa-east-1.petabox.io)
+ endpoint> 1
+
+ Option acl.
+ Canned ACL used when creating buckets and storing or copying objects.
+ This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
+ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
+ Note that this ACL is applied when server-side copying objects as S3
+ doesn't copy the ACL from the source but rather writes a fresh one.
+ If the acl is an empty string then no X-Amz-Acl: header is added and
+ the default (private) will be used.
+ Choose a number from below, or type in your own value.
+ Press Enter to leave empty.
+ / Owner gets FULL_CONTROL.
+ 1 | No one else has access rights (default).
+ \ (private)
+ / Owner gets FULL_CONTROL.
+ 2 | The AllUsers group gets READ access.
+ \ (public-read)
+ / Owner gets FULL_CONTROL.
+ 3 | The AllUsers group gets READ and WRITE access.
+ | Granting this on a bucket is generally not recommended.
+ \ (public-read-write)
+ / Owner gets FULL_CONTROL.
+ 4 | The AuthenticatedUsers group gets READ access.
+ \ (authenticated-read)
+ / Object owner gets FULL_CONTROL.
+ 5 | Bucket owner gets READ access.
+ | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \ (bucket-owner-read)
+ / Both the object owner and the bucket owner get FULL_CONTROL over the object.
+ 6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \ (bucket-owner-full-control)
+ acl> 1
+
+ Edit advanced config?
+ y) Yes
+ n) No (default)
+ y/n> No
+
+ Configuration complete.
+ Options:
+ - type: s3
+ - provider: Petabox
+ - access_key_id: YOUR_ACCESS_KEY_ID
+ - secret_access_key: YOUR_SECRET_ACCESS_KEY
+ - region: us-east-1
+ - endpoint: s3.petabox.io
+ Keep this "My Petabox Storage" remote?
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+This will leave the config file looking like this.
+
+ [My Petabox Storage]
+ type = s3
+ provider = Petabox
+ access_key_id = YOUR_ACCESS_KEY_ID
+ secret_access_key = YOUR_SECRET_ACCESS_KEY
+ region = us-east-1
+ endpoint = s3.petabox.io
+
Storj
Storj is a decentralized cloud storage which can be used through its
@@ -24576,7 +25162,7 @@ support it, chunker will then add metadata to all files, even small.
However, this can double the amount of small files in storage and incur
additional service charges. You can even use chunker to force md5/sha1
support in any other remote at expense of sidecar meta objects by
-setting e.g. chunk_type=sha1all to force hashsums and chunk_size=1P to
+setting e.g. hash_type=sha1all to force hashsums and chunk_size=1P to
effectively disable chunking.
Normally, when a file is copied to chunker controlled remote, chunker
@@ -25442,7 +26028,7 @@ An experimental advanced option filename_encoding is now provided to
address this problem to a certain degree. For cloud storage systems with
case sensitive file names (e.g. Google Drive), base64 can be used to
reduce file name length. For cloud storage systems using UTF-16 to store
-file names internally (e.g. OneDrive), base32768 can be used to
+file names internally (e.g. OneDrive, Dropbox), base32768 can be used to
drastically reduce file name length.
An alternative, future rclone file name encryption mode may tolerate
@@ -25472,7 +26058,7 @@ depends on that.
Hashes are not stored for crypt. However the data integrity is protected
by an extremely strong crypto authenticator.
-Use the rclone cryptcheck command to check the integrity of a crypted
+Use the rclone cryptcheck command to check the integrity of an encrypted
remote instead of rclone check which can't check the checksums properly.
Standard options
@@ -25512,7 +26098,7 @@ Properties:
- Very simple filename obfuscation.
- "off"
- Don't encrypt the file names.
- - Adds a ".bin" extension only.
+ - Adds a ".bin", or "suffix" extension only.
--crypt-directory-name-encryption
@@ -25567,6 +26153,8 @@ remote).
--crypt-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different crypt
configs.
@@ -25620,6 +26208,21 @@ Properties:
- "false"
- Encrypt file data.
+--crypt-pass-bad-blocks
+
+If set this will pass bad blocks through as all 0.
+
+This should not be set in normal operation, it should only be set if
+trying to recover an encrypted file with errors and it is desired to
+recover as much of the file as possible.
+
+Properties:
+
+- Config: pass_bad_blocks
+- Env Var: RCLONE_CRYPT_PASS_BAD_BLOCKS
+- Type: bool
+- Default: false
+
--crypt-filename-encoding
How to encode the encrypted filename to text string.
@@ -25643,7 +26246,21 @@ Properties:
- Encode using base32768. Suitable if your remote counts
UTF-16 or
- Unicode codepoint instead of UTF-8 byte length. (Eg.
- Onedrive)
+ Onedrive, Dropbox)
+
+--crypt-suffix
+
+If this is set it will override the default suffix of ".bin".
+
+Setting suffix to "none" will result in an empty suffix. This may be
+useful when the path length is critical.
+
+Properties:
+
+- Config: suffix
+- Env Var: RCLONE_CRYPT_SUFFIX
+- Type: string
+- Default: ".bin"
Metadata
@@ -25696,11 +26313,11 @@ Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
-Backing up a crypted remote
+Backing up an encrypted remote
-If you wish to backup a crypted remote, it is recommended that you use
-rclone sync on the encrypted files, and make sure the passwords are the
-same in the new encrypted remote.
+If you wish to backup an encrypted remote, it is recommended that you
+use rclone sync on the encrypted files, and make sure the passwords are
+the same in the new encrypted remote.
This will have the following advantages
@@ -26523,8 +27140,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
-- batch_mode: async - default batch_timeout is 500ms
-- batch_mode: sync - default batch_timeout is 10s
+- batch_mode: async - default batch_timeout is 10s
+- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
Properties:
@@ -26545,6 +27162,17 @@ Properties:
- Type: Duration
- Default: 10m0s
+--dropbox-pacer-min-sleep
+
+Minimum time to sleep between API calls.
+
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_DROPBOX_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
--dropbox-encoding
The encoding for the backend.
@@ -27667,6 +28295,19 @@ Properties:
- Type: string
- Required: false
+--gcs-user-project
+
+User project.
+
+Optional - needed only for requester pays.
+
+Properties:
+
+- Config: user_project
+- Env Var: RCLONE_GCS_USER_PROJECT
+- Type: string
+- Required: false
+
--gcs-service-account-file
Service Account Credentials JSON file path.
@@ -27963,6 +28604,21 @@ Properties:
- Type: string
- Required: false
+--gcs-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is
+created
+
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with "/", to persist the folder.
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_GCS_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
--gcs-no-check-bucket
If set, don't attempt to check the bucket exists or create it.
@@ -28249,12 +28905,10 @@ There's a few steps we need to go through to accomplish this:
Google Developer Console.
- You must have a project - create one if you don't.
- Then go to "IAM & admin" -> "Service Accounts".
-- Use the "Create Credentials" button. Fill in "Service account name"
- with something that identifies your client. "Role" can be empty.
-- Tick "Furnish a new private key" - select "Key type JSON".
-- Tick "Enable G Suite Domain-wide Delegation". This option makes
- "impersonation" possible, as documented here: Delegating domain-wide
- authority to the service account
+- Use the "Create Service Account" button. Fill in "Service account
+ name" and "Service account ID" with something that identifies your
+ client.
+- Select "Create And Continue". Step 2 and 3 are optional.
- These credentials are what rclone will use for authentication. If
you ever need to remove access, press the "Delete service account
key" button.
@@ -28424,7 +29078,7 @@ like a symlink in unix, except they point to the underlying file data
(e.g. the inode in unix terms) so they don't break if the source is
renamed or moved about.
-Be default rclone treats these as follows.
+By default rclone treats these as follows.
For shortcuts pointing to files:
@@ -29132,6 +29786,8 @@ Properties:
--drive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different drive
configs.
@@ -29275,6 +29931,27 @@ Properties:
- Type: MultiEncoder
- Default: InvalidUtf8
+--drive-env-auth
+
+Get IAM credentials from runtime (environment variables or instance meta
+data if no env vars).
+
+Only applies if service_account_file and service_account_credentials is
+blank.
+
+Properties:
+
+- Config: env_auth
+- Env Var: RCLONE_DRIVE_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+ - "false"
+ - Enter credentials in the next step.
+ - "true"
+ - Get GCP IAM credentials from the environment (env vars or
+ IAM).
+
Backend commands
Here are the commands specific to the drive backend.
@@ -30409,7 +31086,7 @@ directory, usually ~/.cache/rclone/kv/. Databases are maintained one per
base backend, named like BaseRemote~hasher.bolt. Checksums for multiple
alias-es into a single base backend will be stored in the single
database. All local paths are treated as aliases into the local backend
-(unless crypted or chunked) and stored in
+(unless encrypted or chunked) and stored in
~/.cache/rclone/kv/local~hasher.bolt. Databases can be shared between
multiple rclone processes.
@@ -30608,7 +31285,7 @@ Properties:
Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
-checks, and wire encryption is required when communicating the the
+checks, and wire encryption are required when communicating with the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.
@@ -31884,9 +32561,10 @@ temporarily on disk (in location given by --temp-dir) before it is
uploaded. Small files will be cached in memory - see the
--jottacloud-md5-memory-limit flag. When uploading from local disk the
source checksum is always available, so this does not apply. Starting
-with rclone version 1.52 the same is true for crypted remotes (in older
-versions the crypt backend would not calculate hashes for uploads from
-local disk, so the Jottacloud backend had to do it as described above).
+with rclone version 1.52 the same is true for encrypted remotes (in
+older versions the crypt backend would not calculate hashes for uploads
+from local disk, so the Jottacloud backend had to do it as described
+above).
Restricted filename characters
@@ -33031,7 +33709,7 @@ Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default. Some ISPs throttle
HTTP connections, this causes transfers to become very slow. Enabling
this will force MEGA to use HTTPS for all transfers. HTTPS is normally
-not necesary since all data is already encrypted anyway. Enabling it
+not necessary since all data is already encrypted anyway. Enabling it
will increase CPU usage and add network overhead.
Properties:
@@ -33605,6 +34283,14 @@ It reads configuration from these variables, in the following order:
authenticate to
- AZURE_USERNAME: a username (usually an email address)
- AZURE_PASSWORD: the user's password
+4. Workload Identity
+ - AZURE_TENANT_ID: Tenant to authenticate in.
+ - AZURE_CLIENT_ID: Client ID of the application the user will
+ authenticate to.
+ - AZURE_FEDERATED_TOKEN_FILE: Path to projected service account
+ token file.
+ - AZURE_AUTHORITY_HOST: Authority of an Azure Active Directory
+ endpoint (default: login.microsoftonline.com).
Env Auth: 2. Managed Service Identity Credentials
@@ -33630,7 +34316,7 @@ Then you could access rclone resources like this:
Or
- rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER
+ rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
Which is analogous to using the az tool:
@@ -34222,6 +34908,24 @@ Properties:
- "container"
- Allow full public read access for container and blob data.
+--azureblob-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is
+created
+
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with "/", to persist the folder.
+
+This object also has the metadata "hdi_isfolder = true" to conform to
+the Microsoft standard.
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_AZUREBLOB_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
--azureblob-no-check-container
If set, don't attempt to check the container exists or create it.
@@ -34745,6 +35449,8 @@ Properties:
--onedrive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different
onedrive configs.
@@ -34850,7 +35556,7 @@ Properties:
Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
-default hash which is is QuickXorHash.
+default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
@@ -34885,6 +35591,28 @@ Properties:
- "none"
- None - don't use any hashes
+--onedrive-av-override
+
+Allows download of files the server thinks has a virus.
+
+The onedrive/sharepoint server may check files uploaded with an Anti
+Virus checker. If it detects any potential viruses or malware it will
+block download of the file.
+
+In this case you will see a message like this
+
+ server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
+
+If you are 100% sure you want to download this file anyway then use the
+--onedrive-av-override flag, or av_override = true in the config file.
+
+Properties:
+
+- Config: av_override
+- Env Var: RCLONE_ONEDRIVE_AV_OVERRIDE
+- Type: bool
+- Default: false
+
--onedrive-encoding
The encoding for the backend.
@@ -35951,8 +36679,7 @@ Properties:
--oos-sse-kms-key-id
-if using using your own master key in vault, this header specifies the
-OCID
+if using your own master key in vault, this header specifies the OCID
(https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)
of a master encryption key used to call the Key Management service to
generate a data encryption key or to encrypt or decrypt a data
@@ -36580,6 +37307,7 @@ that being:
- Memset Memstore
- OVH Object Storage
- Oracle Cloud Storage
+- Blomp Cloud Storage
- IBM Bluemix Cloud ObjectStorage Swift
Paths are specified as remote:container (or remote: for the lsd
@@ -36603,7 +37331,7 @@ This will guide you through an interactive setup process.
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
- XX / OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
+ XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
\ "swift"
[snip]
Storage> swift
@@ -36632,6 +37360,8 @@ This will guide you through an interactive setup process.
\ "https://auth.storage.memset.com/v2.0"
6 / OVH
\ "https://auth.cloud.ovh.net/v3"
+ 7 / Blomp Cloud Storage
+ \ "https://authenticate.ain.net"
auth>
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
user_id>
@@ -36805,7 +37535,7 @@ strings.
Standard options
Here are the Standard options specific to swift (OpenStack Swift
-(Rackspace Cloud Files, Memset Memstore, OVH)).
+(Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
--swift-env-auth
@@ -36870,6 +37600,8 @@ Properties:
- Memset Memstore UK v2
- "https://auth.cloud.ovh.net/v3"
- OVH
+ - "https://authenticate.ain.net"
+ - Blomp Cloud Storage
--swift-user-id
@@ -37050,7 +37782,7 @@ Properties:
Advanced options
Here are the Advanced options specific to swift (OpenStack Swift
-(Rackspace Cloud Files, Memset Memstore, OVH)).
+(Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
--swift-leave-parts-on-error
@@ -37470,6 +38202,302 @@ Properties:
- Type: string
- Required: false
+PikPak
+
+PikPak is a private cloud drive.
+
+Paths are specified as remote:path, and may be as deep as required, e.g.
+remote:directory/subdirectory.
+
+Configuration
+
+Here is an example of making a remote for PikPak.
+
+First run:
+
+ rclone config
+
+This will guide you through an interactive setup process:
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+
+ Enter name for new remote.
+ name> remote
+
+ Option Storage.
+ Type of storage to configure.
+ Choose a number from below, or type in your own value.
+ XX / PikPak
+ \ (pikpak)
+ Storage> XX
+
+ Option user.
+ Pikpak username.
+ Enter a value.
+ user> USERNAME
+
+ Option pass.
+ Pikpak password.
+ Choose an alternative below.
+ y) Yes, type in my own password
+ g) Generate random password
+ y/g> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+
+ Edit advanced config?
+ y) Yes
+ n) No (default)
+ y/n>
+
+ Configuration complete.
+ Options:
+ - type: pikpak
+ - user: USERNAME
+ - pass: *** ENCRYPTED ***
+ - token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
+ Keep this "remote" remote?
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+Standard options
+
+Here are the Standard options specific to pikpak (PikPak).
+
+--pikpak-user
+
+Pikpak username.
+
+Properties:
+
+- Config: user
+- Env Var: RCLONE_PIKPAK_USER
+- Type: string
+- Required: true
+
+--pikpak-pass
+
+Pikpak password.
+
+NB Input to this must be obscured - see rclone obscure.
+
+Properties:
+
+- Config: pass
+- Env Var: RCLONE_PIKPAK_PASS
+- Type: string
+- Required: true
+
+Advanced options
+
+Here are the Advanced options specific to pikpak (PikPak).
+
+--pikpak-client-id
+
+OAuth Client Id.
+
+Leave blank normally.
+
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PIKPAK_CLIENT_ID
+- Type: string
+- Required: false
+
+--pikpak-client-secret
+
+OAuth Client Secret.
+
+Leave blank normally.
+
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
+- Type: string
+- Required: false
+
+--pikpak-token
+
+OAuth Access Token as a JSON blob.
+
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PIKPAK_TOKEN
+- Type: string
+- Required: false
+
+--pikpak-auth-url
+
+Auth server URL.
+
+Leave blank to use the provider defaults.
+
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PIKPAK_AUTH_URL
+- Type: string
+- Required: false
+
+--pikpak-token-url
+
+Token server url.
+
+Leave blank to use the provider defaults.
+
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PIKPAK_TOKEN_URL
+- Type: string
+- Required: false
+
+--pikpak-root-folder-id
+
+ID of the root folder. Leave blank normally.
+
+Fill in for rclone to use a non root folder as its starting point.
+
+Properties:
+
+- Config: root_folder_id
+- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
+- Type: string
+- Required: false
+
+--pikpak-use-trash
+
+Send files to the trash instead of deleting permanently.
+
+Defaults to true, namely sending files to the trash. Use
+--pikpak-use-trash=false to delete files permanently instead.
+
+Properties:
+
+- Config: use_trash
+- Env Var: RCLONE_PIKPAK_USE_TRASH
+- Type: bool
+- Default: true
+
+--pikpak-trashed-only
+
+Only show files that are in the trash.
+
+This will show trashed files in their original directory structure.
+
+Properties:
+
+- Config: trashed_only
+- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
+- Type: bool
+- Default: false
+
+--pikpak-hash-memory-limit
+
+Files bigger than this will be cached on disk to calculate hash if
+required.
+
+Properties:
+
+- Config: hash_memory_limit
+- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
+- Type: SizeSuffix
+- Default: 10Mi
+
+--pikpak-encoding
+
+The encoding for the backend.
+
+See the encoding section in the overview for more info.
+
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PIKPAK_ENCODING
+- Type: MultiEncoder
+- Default:
+ Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+
+Backend commands
+
+Here are the commands specific to the pikpak backend.
+
+Run them with
+
+ rclone backend COMMAND remote:
+
+The help below will explain what arguments each command takes.
+
+See the backend command for more info on how to pass options and
+arguments.
+
+These can be run on a running backend using the rc command
+backend/command.
+
+addurl
+
+Add offline download task for url
+
+ rclone backend addurl remote: [options] [+]
+
+This command adds offline download task for url.
+
+Usage:
+
+ rclone backend addurl pikpak:dirpath url
+
+Downloads will be stored in 'dirpath'. If 'dirpath' is invalid, download
+will fallback to default 'My Pack' folder.
+
+decompress
+
+Request decompress of a file/files in a folder
+
+ rclone backend decompress remote: [options] [+]
+
+This command requests decompress of file/files in a folder.
+
+Usage:
+
+ rclone backend decompress pikpak:dirpath {filename} -o password=password
+ rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
+
+An optional argument 'filename' can be specified for a file located in
+'pikpak:dirpath'. You may want to pass '-o password=password' for a
+password-protected files. Also, pass '-o delete-src-file' to delete
+source files after decompression finished.
+
+Result:
+
+ {
+ "Decompressed": 17,
+ "SourceDeleted": 0,
+ "Errors": 0
+ }
+
+Limitations
+
+Hashes
+
+PikPak supports MD5 hash, but sometimes given empty especially for
+user-uploaded files.
+
+Deleted files
+
+Deleted files will still be visible with --pikpak-trashed-only even
+after the trash emptied. This goes away after few days.
+
premiumize.me
Paths are specified as remote:path
@@ -38963,7 +39991,7 @@ Pass multiple variables space separated, eg
VAR1=value VAR2=value
-and pass variables with spaces in in quotes, eg
+and pass variables with spaces in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
@@ -39034,6 +40062,27 @@ Properties:
- Type: SpaceSepList
- Default:
+--sftp-host-key-algorithms
+
+Space separated list of host key algorithms, ordered by preference.
+
+At least one must match with server configuration. This can be checked
+for example using ssh -Q HostKeyAlgorithms.
+
+Note: This can affect the outcome of key negotiation with the server
+even if server host key validation is not enabled.
+
+Example:
+
+ ssh-ed25519 ssh-rsa ssh-dss
+
+Properties:
+
+- Config: host_key_algorithms
+- Env Var: RCLONE_SFTP_HOST_KEY_ALGORITHMS
+- Type: SpaceSepList
+- Default:
+
Limitations
On some SFTP servers (e.g. Synology) the paths are different for SSH and
@@ -39081,7 +40130,7 @@ Notes
The first path segment must be the name of the share, which you entered
when you started to share on Windows. On smbd, it's the section title in
-smb.conf (usually in /etc/samba/) file. You can find shares by quering
+smb.conf (usually in /etc/samba/) file. You can find shares by querying
the root if you're unsure (e.g. rclone lsd remote:).
You can't access to the shared printers from rclone, obviously.
@@ -40100,7 +41149,8 @@ To copy a local directory to an Uptobox directory called backup
Modified time and hashes
-Uptobox supports neither modified times nor checksums.
+Uptobox supports neither modified times nor checksums. All timestamps
+will read as that set by --default-time.
Restricted filename characters
@@ -40136,6 +41186,17 @@ Advanced options
Here are the Advanced options specific to uptobox (Uptobox).
+--uptobox-private
+
+Set to make uploaded files private
+
+Properties:
+
+- Config: private
+- Env Var: RCLONE_UPTOBOX_PRIVATE
+- Type: bool
+- Default: false
+
--uptobox-encoding
The encoding for the backend.
@@ -40530,17 +41591,19 @@ This will guide you through an interactive setup process:
url> https://example.com/remote.php/webdav/
Name of the WebDAV site/service/software you are using
Choose a number from below, or type in your own value
- 1 / Nextcloud
- \ "nextcloud"
- 2 / Owncloud
- \ "owncloud"
- 3 / Sharepoint Online, authenticated by Microsoft account.
- \ "sharepoint"
- 4 / Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
- \ "sharepoint-ntlm"
- 5 / Other site/service or software
- \ "other"
- vendor> 1
+ 1 / Fastmail Files
+ \ (fastmail)
+ 2 / Nextcloud
+ \ (nextcloud)
+ 3 / Owncloud
+ \ (owncloud)
+ 4 / Sharepoint Online, authenticated by Microsoft account
+ \ (sharepoint)
+ 5 / Sharepoint with NTLM authentication, usually self-hosted or on-premises
+ \ (sharepoint-ntlm)
+ 6 / Other site/service or software
+ \ (other)
+ vendor> 2
User name
user> user
Password.
@@ -40586,12 +41649,14 @@ To copy a local directory to an WebDAV directory called backup
Modified time and hashes
Plain WebDAV does not support modified times. However when used with
-Owncloud or Nextcloud rclone will support modified times.
+Fastmail Files, Owncloud or Nextcloud rclone will support modified
+times.
Likewise plain WebDAV does not support hashes, however when used with
-Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes. Depending
-on the exact version of Owncloud or Nextcloud hashes may appear on all
-objects, or only on objects which had a hash uploaded with them.
+Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5
+hashes. Depending on the exact version of Owncloud or Nextcloud hashes
+may appear on all objects, or only on objects which had a hash uploaded
+with them.
Standard options
@@ -40621,6 +41686,8 @@ Properties:
- Type: string
- Required: false
- Examples:
+ - "fastmail"
+ - Fastmail Files
- "nextcloud"
- Nextcloud
- "owncloud"
@@ -40725,10 +41792,47 @@ Properties:
- Type: CommaSepList
- Default:
+--webdav-pacer-min-sleep
+
+Minimum time to sleep between API calls.
+
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_WEBDAV_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
+--webdav-nextcloud-chunk-size
+
+Nextcloud upload chunk size.
+
+We recommend configuring your NextCloud instance to increase the max
+chunk size to 1 GB for better upload performances. See
+https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
+
+Set to 0 to disable chunked uploading.
+
+Properties:
+
+- Config: nextcloud_chunk_size
+- Env Var: RCLONE_WEBDAV_NEXTCLOUD_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 10Mi
+
Provider notes
See below for notes on specific providers.
+Fastmail Files
+
+Use https://webdav.fastmail.com/ or a subdirectory as the URL, and your
+Fastmail email username@domain.tld as the username. Follow this
+documentation to create an app password with access to Files (WebDAV)
+and use this as the password.
+
+Fastmail supports modified times using the X-OC-Mtime header.
+
Owncloud
Click on the settings cog in the bottom right of the page and this will
@@ -41964,6 +43068,271 @@ Options:
Changelog
+v1.63.0 - 2023-06-30
+
+See commits
+
+- New backends
+ - Pikpak (wiserain)
+ - New S3 providers
+ - petabox.io (Andrei Smirnov)
+ - Google Cloud Storage (Anthony Pessy)
+ - New WebDAV providers
+ - Fastmail (Arnavion)
+- Major changes
+ - Files will be copied to a temporary name ending in .partial when
+ copying to local,ftp,sftp then renamed at the end of the
+ transfer. (Janne Hellsten, Nick Craig-Wood)
+ - This helps with data integrity as we don't delete the
+ existing file until the new one is complete.
+ - It can be disabled with the --inplace flag.
+ - This behaviour will also happen if the backend is wrapped,
+ for example sftp wrapped with crypt.
+ - The s3, azureblob and gcs backends now support directory markers
+ so empty directories are supported (Jānis Bebrītis, Nick
+ Craig-Wood)
+ - The --default-time flag now controls the unknown modification
+ time of files/dirs (Nick Craig-Wood)
+ - If a file or directory does not have a modification time
+ rclone can read then rclone will display this fixed time
+ instead.
+ - For the old behaviour use --default-time 0s which will set
+ this time to the time rclone started up.
+- New Features
+ - build
+ - Modernise linters in use and fixup all affected code
+ (albertony)
+ - Push docker beta to GHCR (GitHub container registry)
+ (Richard Tweed)
+ - cat: Add --separator option to cat command (Loren Gordon)
+ - config
+ - Do not remove/overwrite other files during config file save
+ (albertony)
+ - Do not overwrite config file symbolic link (albertony)
+ - Stop config create making invalid config files (Nick
+ Craig-Wood)
+ - doc updates (Adam K, Aditya Basu, albertony, asdffdsazqqq, Damo,
+ danielkrajnik, Dimitri Papadopoulos, dlitster, Drew Parsons,
+ jumbi77, kapitainsky, mac-15, Mariusz Suchodolski, Nick
+ Craig-Wood, NickIAm, Rintze Zelle, Stanislav Gromov, Tareq
+ Sharafy, URenko, yuudi, Zach Kipp)
+ - fs
+ - Add size to JSON logs when moving or copying an object (Nick
+ Craig-Wood)
+ - Allow boolean features to be enabled with --disable !Feature
+ (Nick Craig-Wood)
+ - genautocomplete: Rename to completion with alias to the old name
+ (Nick Craig-Wood)
+ - librclone: Added example on using librclone with Go (alankrit)
+ - lsjson: Make --stat more efficient (Nick Craig-Wood)
+ - operations
+ - Implement --multi-thread-write-buffer-size for speed
+ improvements on downloads (Paulo Schreiner)
+ - Reopen downloads on error when using check --download and
+ cat (Nick Craig-Wood)
+ - rc: config/listremotes includes remotes defined with environment
+ variables (kapitainsky)
+ - selfupdate: Obey --no-check-certificate flag (Nick Craig-Wood)
+ - serve restic: Trigger systemd notify (Shyim)
+ - serve webdav: Implement owncloud checksum and modtime extensions
+ (WeidiDeng)
+ - sync: --suffix-keep-extension preserve 2 part extensions like
+ .tar.gz (Nick Craig-Wood)
+- Bug Fixes
+ - accounting
+ - Fix Prometheus metrics to be the same as core/stats (Nick
+ Craig-Wood)
+ - Bwlimit signal handler should always start (Sam Lai)
+ - bisync: Fix maxDelete parameter being ignored via the rc (Nick
+ Craig-Wood)
+ - cmd/ncdu: Fix screen corruption when logging (eNV25)
+ - filter: Fix deadlock with errors on --files-from (douchen)
+ - fs
+ - Fix interaction between --progress and --interactive (Nick
+ Craig-Wood)
+ - Fix infinite recursive call in pacer ModifyCalculator (fixes
+ issue reported by the staticcheck linter) (albertony)
+ - lib/atexit: Ensure OnError only calls cancel function once (Nick
+ Craig-Wood)
+ - lib/rest: Fix problems re-using HTTP connections (Nick
+ Craig-Wood)
+ - rc
+ - Fix operations/stat with trailing / (Nick Craig-Wood)
+ - Fix missing --rc flags (Nick Craig-Wood)
+ - Fix output of Time values in options/get (Nick Craig-Wood)
+ - serve dlna: Fix potential data race (Nick Craig-Wood)
+ - version: Fix reported os/kernel version for windows (albertony)
+- Mount
+ - Add --mount-case-insensitive to force the mount to be case
+ insensitive (Nick Craig-Wood)
+ - Removed unnecessary byte slice allocation for reads (Anagh Kumar
+ Baranwal)
+ - Clarify rclone mount error when installed via homebrew (Nick
+ Craig-Wood)
+ - Added _netdev to the example mount so it gets treated as a
+ remote-fs rather than local-fs (Anagh Kumar Baranwal)
+- Mount2
+ - Updated go-fuse version (Anagh Kumar Baranwal)
+ - Fixed statfs (Anagh Kumar Baranwal)
+ - Disable xattrs (Anagh Kumar Baranwal)
+- VFS
+ - Add MkdirAll function to make a directory and all beneath (Nick
+ Craig-Wood)
+ - Fix reload: failed to add virtual dir entry: file does not exist
+ (Nick Craig-Wood)
+ - Fix writing to a read only directory creating spurious directory
+ entries (WeidiDeng)
+ - Fix potential data race (Nick Craig-Wood)
+ - Fix backends being Shutdown too early when startup takes a long
+ time (Nick Craig-Wood)
+- Local
+ - Fix filtering of symlinks with -l/--links flag (Nick Craig-Wood)
+ - Fix /path/to/file.rclonelink when -l/--links is in use (Nick
+ Craig-Wood)
+ - Fix crash with --metadata on Android (Nick Craig-Wood)
+- Cache
+ - Fix backends shutting down when in use when used via the rc
+ (Nick Craig-Wood)
+- Crypt
+ - Add --crypt-suffix option to set a custom suffix for encrypted
+ files (jladbrook)
+ - Add --crypt-pass-bad-blocks to allow corrupted file output (Nick
+ Craig-Wood)
+ - Fix reading 0 length files (Nick Craig-Wood)
+ - Try not to return "unexpected EOF" error (Nick Craig-Wood)
+ - Reduce allocations (albertony)
+ - Recommend Dropbox for base32768 encoding (Nick Craig-Wood)
+- Azure Blob
+ - Empty directory markers (Nick Craig-Wood)
+ - Support azure workload identities (Tareq Sharafy)
+ - Fix azure blob uploads with multiple bits of metadata (Nick
+ Craig-Wood)
+ - Fix azurite compatibility by sending nil tier if set to empty
+ string (Roel Arents)
+- Combine
+ - Implement missing methods (Nick Craig-Wood)
+ - Fix goroutine stack overflow on bad object (Nick Craig-Wood)
+- Drive
+ - Add --drive-env-auth to get IAM credentials from runtime (Peter
+ Brunner)
+ - Update drive service account guide (Juang, Yi-Lin)
+ - Fix change notify picking up files outside the root (Nick
+ Craig-Wood)
+ - Fix trailing slash mis-identificaton of folder as file (Nick
+ Craig-Wood)
+ - Fix incorrect remote after Update on object (Nick Craig-Wood)
+- Dropbox
+ - Implement --dropbox-pacer-min-sleep flag (Nick Craig-Wood)
+ - Fix the dropbox batcher stalling (Misty)
+- Fichier
+ - Add --ficicher-cdn option to use the CDN for download (Nick
+ Craig-Wood)
+- FTP
+ - Lower log message priority when SetModTime is not supported to
+ debug (Tobias Gion)
+ - Fix "unsupported LIST line" errors on startup (Nick Craig-Wood)
+ - Fix "501 Not a valid pathname." errors when creating directories
+ (Nick Craig-Wood)
+- Google Cloud Storage
+ - Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+ - Added --gcs-user-project needed for requester pays (Christopher
+ Merry)
+- HTTP
+ - Add client certificate user auth middleware. This can auth
+ serve restic from the username in the client cert. (Peter Fern)
+- Jottacloud
+ - Fix vfs writeback stuck in a failed upload loop with file
+ versioning disabled (albertony)
+- Onedrive
+ - Add --onedrive-av-override flag to download files flagged as
+ virus (Nick Craig-Wood)
+ - Fix quickxorhash on 32 bit architectures (Nick Craig-Wood)
+ - Report any list errors during rclone cleanup (albertony)
+- Putio
+ - Fix uploading to the wrong object on Update with overriden
+ remote name (Nick Craig-Wood)
+ - Fix modification times not being preserved for server side copy
+ and move (Nick Craig-Wood)
+ - Fix server side copy failures (400 errors) (Nick Craig-Wood)
+- S3
+ - Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+ - Update Scaleway storage classes (Brian Starkey)
+ - Fix --s3-versions on individual objects (Nick Craig-Wood)
+ - Fix hang on aborting multpart upload with iDrive e2 (Nick
+ Craig-Wood)
+ - Fix missing "tier" metadata (Nick Craig-Wood)
+ - Fix V3sign: add missing subresource delete (cc)
+ - Fix Arvancloud Domain and region changes and alphabetise the
+ provider (Ehsan Tadayon)
+ - Fix Qiniu KODO quirks virtualHostStyle is false (zzq)
+- SFTP
+ - Add --sftp-host-key-algorithms to allow specifying SSH host key
+ algorithms (Joel)
+ - Fix using --sftp-key-use-agent and --sftp-key-file together
+ needing private key file (Arnav Singh)
+ - Fix move to allow overwriting existing files (Nick Craig-Wood)
+ - Don't stat directories before listing them (Nick Craig-Wood)
+ - Don't check remote points to a file if it ends with / (Nick
+ Craig-Wood)
+- Sharefile
+ - Disable streamed transfers as they no longer work (Nick
+ Craig-Wood)
+- Smb
+ - Code cleanup to avoid overwriting ctx before first use (fixes
+ issue reported by the staticcheck linter) (albertony)
+- Storj
+ - Fix "uplink: too many requests" errors when uploading to the
+ same file (Nick Craig-Wood)
+ - Fix uploading to the wrong object on Update with overriden
+ remote name (Nick Craig-Wood)
+- Swift
+ - Ignore 404 error when deleting an object (Nick Craig-Wood)
+- Union
+ - Implement missing methods (Nick Craig-Wood)
+ - Allow errors to be unwrapped for inspection (Nick Craig-Wood)
+- Uptobox
+ - Add --uptobox-private flag to make all uploaded files private
+ (Nick Craig-Wood)
+ - Fix improper regex (Aaron Gokaslan)
+ - Fix Update returning the wrong object (Nick Craig-Wood)
+ - Fix rmdir declaring that directories weren't empty (Nick
+ Craig-Wood)
+- WebDAV
+ - nextcloud: Add support for chunked uploads (Paul)
+ - Set modtime using propset for owncloud and nextcloud (WeidiDeng)
+ - Make pacer minSleep configurable with --webdav-pacer-min-sleep
+ (ed)
+ - Fix server side copy/move not overwriting (WeidiDeng)
+ - Fix modtime on server side copy for owncloud and nextcloud (Nick
+ Craig-Wood)
+- Yandex
+ - Fix 400 Bad Request on transfer failure (Nick Craig-Wood)
+- Zoho
+ - Fix downloads with Range: header returning the wrong data (Nick
+ Craig-Wood)
+
+v1.62.2 - 2023-03-16
+
+See commits
+
+- Bug Fixes
+ - docker volume plugin: Add missing fuse3 dependency (Nick
+ Craig-Wood)
+ - docs: Fix size documentation (asdffdsazqqq)
+- FTP
+ - Fix 426 errors on downloads with vsftpd (Lesmiscore)
+
+v1.62.1 - 2023-03-15
+
+See commits
+
+- Bug Fixes
+ - docker: Add missing fuse3 dependency (cycneuramus)
+ - build: Update release docs to be more careful with the tag (Nick
+ Craig-Wood)
+ - build: Set Github release to draft while uploading binaries
+ (Nick Craig-Wood)
+
v1.62.0 - 2023-03-14
See commits
@@ -44707,9 +46076,9 @@ See commits
- Use proper import path go.etcd.io/bbolt (Robert-André Mauchin)
- Crypt
- Calculate hashes for uploads from local disk (Nick Craig-Wood)
- - This allows crypted Jottacloud uploads without using local
+ - This allows encrypted Jottacloud uploads without using local
disk
- - This means crypted s3/b2 uploads will now have hashes
+ - This means encrypted s3/b2 uploads will now have hashes
- Added rclone backend decode/encode commands to replicate
functionality of cryptdecode (Anagh Kumar Baranwal)
- Get rid of the unused Cipher interface as it obfuscated the code
@@ -46345,7 +47714,7 @@ v1.42 - 2018-06-16
- Fix panic when running without plex configs (Remus Bunduc)
- Fix root folder caching (Remus Bunduc)
- Crypt
- - Check the crypted hash of files when uploading for extra data
+ - Check the encrypted hash of files when uploading for extra data
security
- Dropbox
- Make Dropbox for business folders accessible using an initial /
@@ -46767,7 +48136,7 @@ v1.38 - 2017-09-30
- New commands
- rcat - read from standard input and stream upload
- tree - shows a nicely formatted recursive listing
- - cryptdecode - decode crypted file names (thanks ishuah)
+ - cryptdecode - decode encrypted file names (thanks ishuah)
- config show - print the config file
- config file - print the config file location
- New Features
@@ -47117,7 +48486,7 @@ v1.34 - 2016-11-06
- Fix rclone move command
- Delete src files which already existed in dst
- Fix deletion of src file when dst file older
- - Fix rclone check on crypted file systems
+ - Fix rclone check on encrypted file systems
- Make failed uploads not count as "Transferred"
- Make sure high level retries show with -q
- Use a vendor directory with godep for repeatable builds
@@ -47953,10 +49322,27 @@ If you are using systemd-resolved (default on Arch Linux), ensure it is
at version 233 or higher. Previous releases contain a bug which causes
not all domains to be resolved properly.
-Additionally with the GODEBUG=netdns= environment variable the Go
-resolver decision can be influenced. This also allows to resolve certain
-issues with DNS resolution. See the name resolution section in the go
-docs.
+The Go resolver decision can be influenced with the GODEBUG=netdns=...
+environment variable. This also allows to resolve certain issues with
+DNS resolution. On Windows or MacOS systems, try forcing use of the
+internal Go resolver by setting GODEBUG=netdns=go at runtime. On other
+systems (Linux, *BSD, etc) try forcing use of the system name resolver
+by setting GODEBUG=netdns=cgo (and recompile rclone from source with CGO
+enabled if necessary). See the name resolution section in the go docs.
+
+Failed to start auth webserver on Windows
+
+ Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+ ...
+ yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+
+This is sometimes caused by the Host Network Service causing issues with
+opening the port on the host.
+
+A simple solution may be restarting the Host Network Service with eg.
+Powershell
+
+ Restart-Service hns
The total size reported in the stats for a sync is wrong and keeps changing
@@ -48032,7 +49418,7 @@ Authors
Contributors
{{< rem
-email addresses removed from here need to be addeed to bin/.ignore-emails to make sure update-authors.py doesn't immediately put them back in again.
+email addresses removed from here need to be added to bin/.ignore-emails to make sure update-authors.py doesn't immediately put them back in again.
>}}
- Alex Couper amcouper@gmail.com
@@ -48612,7 +49998,7 @@ email addresses removed from here need to be addeed to bin/.ignore-emails to mak
- Leroy van Logchem lr.vanlogchem@gmail.com
- Zsolt Ero zsolt.ero@gmail.com
- Lesmiscore nao20010128@gmail.com
-- ehsantdy ehsan.tadayon@arvancloud.com
+- ehsantdy ehsan.tadayon@arvancloud.com ehsantadayon85@gmail.com
- SwazRGB 65694696+swazrgb@users.noreply.github.com
- Mateusz Puczyński mati6095@gmail.com
- Michael C Tiernan - MIT-Research Computing Project mtiernan@mit.edu
@@ -48622,6 +50008,7 @@ email addresses removed from here need to be addeed to bin/.ignore-emails to mak
- Christian Galo 36752715+cgalo5758@users.noreply.github.com
- Erik van Velzen erik@evanv.nl
- Derek Battams derek@battams.ca
+- Paul devnoname120@gmail.com
- SimonLiu simonliu009@users.noreply.github.com
- Hugo Laloge hla@lescompanions.com
- Mr-Kanister 68117355+Mr-Kanister@users.noreply.github.com
@@ -48722,6 +50109,48 @@ email addresses removed from here need to be addeed to bin/.ignore-emails to mak
- Peter Brunner peter@psykhe.com
- Leandro Sacchet leandro.sacchet@animati.com.br
- dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
+- cycneuramus 56681631+cycneuramus@users.noreply.github.com
+- Arnavion me@arnavion.dev
+- Christopher Merry christopher.merry@mlb.com
+- Thibault Coupin thibault.coupin@gmail.com
+- Richard Tweed RichardoC@users.noreply.github.com
+- Zach Kipp Zacho2@users.noreply.github.com
+- yuudi 26199752+yuudi@users.noreply.github.com
+- NickIAm NickIAm@users.noreply.github.com
+- Juang, Yi-Lin frankyjuang@gmail.com
+- jumbi77 jumbi77@users.noreply.github.com
+- Aditya Basu ab.aditya.basu@gmail.com
+- ed s@ocv.me
+- Drew Parsons dparsons@emerall.com
+- Joel joelnb@users.noreply.github.com
+- wiserain mail275@gmail.com
+- Roel Arents roel.arents@kadaster.nl
+- Shyim github@shyim.de
+- Rintze Zelle 78232505+rzelle-lallemand@users.noreply.github.com
+- Damo damoclark@users.noreply.github.com
+- WeidiDeng weidi_deng@icloud.com
+- Brian Starkey stark3y@gmail.com
+- jladbrook jhladbrook@gmail.com
+- Loren Gordon lorengordon@users.noreply.github.com
+- dlitster davidlitster@gmail.com
+- Tobias Gion tobias@gion.io
+- Jānis Bebrītis janis.bebritis@wunder.io
+- Adam K github.com@ak.tidy.email
+- Andrei Smirnov smirnov.captain@gmail.com
+- Janne Hellsten jjhellst@gmail.com
+- cc 12904584+shvc@users.noreply.github.com
+- Tareq Sharafy tareq.sha@gmail.com
+- kapitainsky dariuszb@me.com
+- douchen playgoobug@gmail.com
+- Sam Lai 70988+slai@users.noreply.github.com
+- URenko 18209292+URenko@users.noreply.github.com
+- Stanislav Gromov kullfar@gmail.com
+- Paulo Schreiner paulo.schreiner@delivion.de
+- Mariusz Suchodolski mariusz@suchodol.ski
+- danielkrajnik dan94kra@gmail.com
+- Peter Fern github@0xc0dedbad.com
+- zzq i@zhangzqs.cn
+- mac-15 usman.ilamdin@phpstudios.com
Contact the rclone project
diff --git a/docs/content/azureblob.md b/docs/content/azureblob.md
index 6520bcf9b..bf545a9cc 100644
--- a/docs/content/azureblob.md
+++ b/docs/content/azureblob.md
@@ -792,6 +792,24 @@ Properties:
- "container"
- Allow full public read access for container and blob data.
+#### --azureblob-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is created
+
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with "/", to persist the folder.
+
+This object also has the metadata "hdi_isfolder = true" to conform to
+the Microsoft standard.
+
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_AZUREBLOB_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
#### --azureblob-no-check-container
If set, don't attempt to check the container exists or create it.
diff --git a/docs/content/changelog.md b/docs/content/changelog.md
index 5bdd596c3..306024dca 100644
--- a/docs/content/changelog.md
+++ b/docs/content/changelog.md
@@ -5,6 +5,177 @@ description: "Rclone Changelog"
# Changelog
+## v1.63.0 - 2023-06-30
+
+[See commits](https://github.com/rclone/rclone/compare/v1.62.0...v1.63.0)
+
+* New backends
+ * [Pikpak](/pikpak/) (wiserain)
+ * New S3 providers
+ * [petabox.io](/s3/#petabox) (Andrei Smirnov)
+ * [Google Cloud Storage](/s3/#google-cloud-storage) (Anthony Pessy)
+ * New WebDAV providers
+ * [Fastmail](/webdav/#fastmail-files) (Arnavion)
+* Major changes
+ * Files will be copied to a temporary name ending in `.partial` when copying to `local`,`ftp`,`sftp` then renamed at the end of the transfer. (Janne Hellsten, Nick Craig-Wood)
+ * This helps with data integrity as we don't delete the existing file until the new one is complete.
+ * It can be disabled with the [--inplace](/docs/#inplace) flag.
+ * This behaviour will also happen if the backend is wrapped, for example `sftp` wrapped with `crypt`.
+ * The [s3](/s3/#s3-directory-markers), [azureblob](/azureblob/#azureblob-directory-markers) and [gcs](/googlecloudstorage/#gcs-directory-markers) backends now support directory markers so empty directories are supported (Jānis Bebrītis, Nick Craig-Wood)
+ * The [--default-time](/docs/#default-time-time) flag now controls the unknown modification time of files/dirs (Nick Craig-Wood)
+ * If a file or directory does not have a modification time rclone can read then rclone will display this fixed time instead.
+ * For the old behaviour use `--default-time 0s` which will set this time to the time rclone started up.
+* New Features
+ * build
+ * Modernise linters in use and fixup all affected code (albertony)
+ * Push docker beta to GHCR (GitHub container registry) (Richard Tweed)
+ * cat: Add `--separator` option to cat command (Loren Gordon)
+ * config
+ * Do not remove/overwrite other files during config file save (albertony)
+ * Do not overwrite config file symbolic link (albertony)
+ * Stop `config create` making invalid config files (Nick Craig-Wood)
+ * doc updates (Adam K, Aditya Basu, albertony, asdffdsazqqq, Damo, danielkrajnik, Dimitri Papadopoulos, dlitster, Drew Parsons, jumbi77, kapitainsky, mac-15, Mariusz Suchodolski, Nick Craig-Wood, NickIAm, Rintze Zelle, Stanislav Gromov, Tareq Sharafy, URenko, yuudi, Zach Kipp)
+ * fs
+ * Add `size` to JSON logs when moving or copying an object (Nick Craig-Wood)
+ * Allow boolean features to be enabled with `--disable !Feature` (Nick Craig-Wood)
+ * genautocomplete: Rename to `completion` with alias to the old name (Nick Craig-Wood)
+ * librclone: Added example on using `librclone` with Go (alankrit)
+ * lsjson: Make `--stat` more efficient (Nick Craig-Wood)
+ * operations
+ * Implement `--multi-thread-write-buffer-size` for speed improvements on downloads (Paulo Schreiner)
+ * Reopen downloads on error when using `check --download` and `cat` (Nick Craig-Wood)
+ * rc: `config/listremotes` includes remotes defined with environment variables (kapitainsky)
+ * selfupdate: Obey `--no-check-certificate` flag (Nick Craig-Wood)
+ * serve restic: Trigger systemd notify (Shyim)
+ * serve webdav: Implement owncloud checksum and modtime extensions (WeidiDeng)
+ * sync: `--suffix-keep-extension` preserve 2 part extensions like .tar.gz (Nick Craig-Wood)
+* Bug Fixes
+ * accounting
+ * Fix Prometheus metrics to be the same as `core/stats` (Nick Craig-Wood)
+ * Bwlimit signal handler should always start (Sam Lai)
+ * bisync: Fix `maxDelete` parameter being ignored via the rc (Nick Craig-Wood)
+ * cmd/ncdu: Fix screen corruption when logging (eNV25)
+ * filter: Fix deadlock with errors on `--files-from` (douchen)
+ * fs
+ * Fix interaction between `--progress` and `--interactive` (Nick Craig-Wood)
+ * Fix infinite recursive call in pacer ModifyCalculator (fixes issue reported by the staticcheck linter) (albertony)
+ * lib/atexit: Ensure OnError only calls cancel function once (Nick Craig-Wood)
+ * lib/rest: Fix problems re-using HTTP connections (Nick Craig-Wood)
+ * rc
+ * Fix `operations/stat` with trailing `/` (Nick Craig-Wood)
+ * Fix missing `--rc` flags (Nick Craig-Wood)
+ * Fix output of Time values in `options/get` (Nick Craig-Wood)
+ * serve dlna: Fix potential data race (Nick Craig-Wood)
+ * version: Fix reported os/kernel version for windows (albertony)
+* Mount
+ * Add `--mount-case-insensitive` to force the mount to be case insensitive (Nick Craig-Wood)
+ * Removed unnecessary byte slice allocation for reads (Anagh Kumar Baranwal)
+ * Clarify rclone mount error when installed via homebrew (Nick Craig-Wood)
+ * Added _netdev to the example mount so it gets treated as a remote-fs rather than local-fs (Anagh Kumar Baranwal)
+* Mount2
+ * Updated go-fuse version (Anagh Kumar Baranwal)
+ * Fixed statfs (Anagh Kumar Baranwal)
+ * Disable xattrs (Anagh Kumar Baranwal)
+* VFS
+ * Add MkdirAll function to make a directory and all beneath (Nick Craig-Wood)
+ * Fix reload: failed to add virtual dir entry: file does not exist (Nick Craig-Wood)
+ * Fix writing to a read only directory creating spurious directory entries (WeidiDeng)
+ * Fix potential data race (Nick Craig-Wood)
+ * Fix backends being Shutdown too early when startup takes a long time (Nick Craig-Wood)
+* Local
+ * Fix filtering of symlinks with `-l`/`--links` flag (Nick Craig-Wood)
+ * Fix /path/to/file.rclonelink when `-l`/`--links` is in use (Nick Craig-Wood)
+ * Fix crash with `--metadata` on Android (Nick Craig-Wood)
+* Cache
+ * Fix backends shutting down when in use when used via the rc (Nick Craig-Wood)
+* Crypt
+ * Add `--crypt-suffix` option to set a custom suffix for encrypted files (jladbrook)
+ * Add `--crypt-pass-bad-blocks` to allow corrupted file output (Nick Craig-Wood)
+ * Fix reading 0 length files (Nick Craig-Wood)
+ * Try not to return "unexpected EOF" error (Nick Craig-Wood)
+ * Reduce allocations (albertony)
+ * Recommend Dropbox for `base32768` encoding (Nick Craig-Wood)
+* Azure Blob
+ * Empty directory markers (Nick Craig-Wood)
+ * Support azure workload identities (Tareq Sharafy)
+ * Fix azure blob uploads with multiple bits of metadata (Nick Craig-Wood)
+ * Fix azurite compatibility by sending nil tier if set to empty string (Roel Arents)
+* Combine
+ * Implement missing methods (Nick Craig-Wood)
+ * Fix goroutine stack overflow on bad object (Nick Craig-Wood)
+* Drive
+ * Add `--drive-env-auth` to get IAM credentials from runtime (Peter Brunner)
+ * Update drive service account guide (Juang, Yi-Lin)
+ * Fix change notify picking up files outside the root (Nick Craig-Wood)
+ * Fix trailing slash mis-identificaton of folder as file (Nick Craig-Wood)
+ * Fix incorrect remote after Update on object (Nick Craig-Wood)
+* Dropbox
+ * Implement `--dropbox-pacer-min-sleep` flag (Nick Craig-Wood)
+ * Fix the dropbox batcher stalling (Misty)
+* Fichier
+ * Add `--ficicher-cdn` option to use the CDN for download (Nick Craig-Wood)
+* FTP
+ * Lower log message priority when `SetModTime` is not supported to debug (Tobias Gion)
+ * Fix "unsupported LIST line" errors on startup (Nick Craig-Wood)
+ * Fix "501 Not a valid pathname." errors when creating directories (Nick Craig-Wood)
+* Google Cloud Storage
+ * Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+ * Added `--gcs-user-project` needed for requester pays (Christopher Merry)
+* HTTP
+ * Add client certificate user auth middleware. This can auth `serve restic` from the username in the client cert. (Peter Fern)
+* Jottacloud
+ * Fix vfs writeback stuck in a failed upload loop with file versioning disabled (albertony)
+* Onedrive
+ * Add `--onedrive-av-override` flag to download files flagged as virus (Nick Craig-Wood)
+ * Fix quickxorhash on 32 bit architectures (Nick Craig-Wood)
+ * Report any list errors during `rclone cleanup` (albertony)
+* Putio
+ * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
+ * Fix modification times not being preserved for server side copy and move (Nick Craig-Wood)
+ * Fix server side copy failures (400 errors) (Nick Craig-Wood)
+* S3
+ * Empty directory markers (Jānis Bebrītis, Nick Craig-Wood)
+ * Update Scaleway storage classes (Brian Starkey)
+ * Fix `--s3-versions` on individual objects (Nick Craig-Wood)
+ * Fix hang on aborting multpart upload with iDrive e2 (Nick Craig-Wood)
+ * Fix missing "tier" metadata (Nick Craig-Wood)
+ * Fix V3sign: add missing subresource delete (cc)
+ * Fix Arvancloud Domain and region changes and alphabetise the provider (Ehsan Tadayon)
+ * Fix Qiniu KODO quirks virtualHostStyle is false (zzq)
+* SFTP
+ * Add `--sftp-host-key-algorithms ` to allow specifying SSH host key algorithms (Joel)
+ * Fix using `--sftp-key-use-agent` and `--sftp-key-file` together needing private key file (Arnav Singh)
+ * Fix move to allow overwriting existing files (Nick Craig-Wood)
+ * Don't stat directories before listing them (Nick Craig-Wood)
+ * Don't check remote points to a file if it ends with / (Nick Craig-Wood)
+* Sharefile
+ * Disable streamed transfers as they no longer work (Nick Craig-Wood)
+* Smb
+ * Code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) (albertony)
+* Storj
+ * Fix "uplink: too many requests" errors when uploading to the same file (Nick Craig-Wood)
+ * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood)
+* Swift
+ * Ignore 404 error when deleting an object (Nick Craig-Wood)
+* Union
+ * Implement missing methods (Nick Craig-Wood)
+ * Allow errors to be unwrapped for inspection (Nick Craig-Wood)
+* Uptobox
+ * Add `--uptobox-private` flag to make all uploaded files private (Nick Craig-Wood)
+ * Fix improper regex (Aaron Gokaslan)
+ * Fix Update returning the wrong object (Nick Craig-Wood)
+ * Fix rmdir declaring that directories weren't empty (Nick Craig-Wood)
+* WebDAV
+ * nextcloud: Add support for chunked uploads (Paul)
+ * Set modtime using propset for owncloud and nextcloud (WeidiDeng)
+ * Make pacer minSleep configurable with `--webdav-pacer-min-sleep` (ed)
+ * Fix server side copy/move not overwriting (WeidiDeng)
+ * Fix modtime on server side copy for owncloud and nextcloud (Nick Craig-Wood)
+* Yandex
+ * Fix 400 Bad Request on transfer failure (Nick Craig-Wood)
+* Zoho
+ * Fix downloads with `Range:` header returning the wrong data (Nick Craig-Wood)
+
## v1.62.2 - 2023-03-16
[See commits](https://github.com/rclone/rclone/compare/v1.62.1...v1.62.2)
diff --git a/docs/content/commands/rclone.md b/docs/content/commands/rclone.md
index e3a8ad857..46b14e639 100644
--- a/docs/content/commands/rclone.md
+++ b/docs/content/commands/rclone.md
@@ -42,7 +42,7 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
* [rclone checksum](/commands/rclone_checksum/) - Checks the files in the source against a SUM file.
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
-* [rclone completion](/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping identical files.
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping identical files.
@@ -52,11 +52,10 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone dedupe](/commands/rclone_dedupe/) - Interactively find duplicate filenames and delete/rename them.
* [rclone delete](/commands/rclone_delete/) - Remove the files in path.
* [rclone deletefile](/commands/rclone_deletefile/) - Remove a single file from remote.
-* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied.
* [rclone hashsum](/commands/rclone_hashsum/) - Produces a hashsum file for all the objects in the path.
* [rclone link](/commands/rclone_link/) - Generate public link to file/folder.
-* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
+* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file and defined in environment variables.
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
diff --git a/docs/content/commands/rclone_check.md b/docs/content/commands/rclone_check.md
index 29d89f22c..7dfd8dba5 100644
--- a/docs/content/commands/rclone_check.md
+++ b/docs/content/commands/rclone_check.md
@@ -52,8 +52,9 @@ you what happened to it. These are reminiscent of diff files.
- `* path` means path was present in source and destination but different.
- `! path` means there was an error reading or hashing the source or dest.
-The default number of parallel checks is N=8. See the [--checkers=N](/docs/#checkers-n) option
-for more information.
+The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
+option for more information.
+
```
rclone check source:path dest:path [flags]
diff --git a/docs/content/commands/rclone_checksum.md b/docs/content/commands/rclone_checksum.md
index 661dab6b2..83be26072 100644
--- a/docs/content/commands/rclone_checksum.md
+++ b/docs/content/commands/rclone_checksum.md
@@ -44,6 +44,9 @@ you what happened to it. These are reminiscent of diff files.
- `* path` means path was present in source and destination but different.
- `! path` means there was an error reading or hashing the source or dest.
+The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
+option for more information.
+
```
rclone checksum sumfile src:path [flags]
diff --git a/docs/content/commands/rclone_completion.md b/docs/content/commands/rclone_completion.md
index 9193c9868..74f1db8f1 100644
--- a/docs/content/commands/rclone_completion.md
+++ b/docs/content/commands/rclone_completion.md
@@ -1,18 +1,20 @@
---
title: "rclone completion"
-description: "Generate the autocompletion script for the specified shell"
+description: "Output completion script for a given shell."
slug: rclone_completion
url: /commands/rclone_completion/
+versionIntroduced: v1.33
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/ and as part of making a release run "make commanddocs"
---
# rclone completion
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
## Synopsis
-Generate the autocompletion script for rclone for the specified shell.
-See each sub-command's help for details on how to use the generated script.
+
+Generates a shell completion script for rclone.
+Run with `--help` to list the supported shells.
## Options
@@ -26,8 +28,7 @@ See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
-* [rclone completion bash](/commands/rclone_completion_bash/) - Generate the autocompletion script for bash
-* [rclone completion fish](/commands/rclone_completion_fish/) - Generate the autocompletion script for fish
-* [rclone completion powershell](/commands/rclone_completion_powershell/) - Generate the autocompletion script for powershell
-* [rclone completion zsh](/commands/rclone_completion_zsh/) - Generate the autocompletion script for zsh
+* [rclone completion bash](/commands/rclone_completion_bash/) - Output bash completion script for rclone.
+* [rclone completion fish](/commands/rclone_completion_fish/) - Output fish completion script for rclone.
+* [rclone completion zsh](/commands/rclone_completion_zsh/) - Output zsh completion script for rclone.
diff --git a/docs/content/commands/rclone_completion_bash.md b/docs/content/commands/rclone_completion_bash.md
index b5772be3e..adb036370 100644
--- a/docs/content/commands/rclone_completion_bash.md
+++ b/docs/content/commands/rclone_completion_bash.md
@@ -1,52 +1,48 @@
---
title: "rclone completion bash"
-description: "Generate the autocompletion script for bash"
+description: "Output bash completion script for rclone."
slug: rclone_completion_bash
url: /commands/rclone_completion_bash/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/bash/ and as part of making a release run "make commanddocs"
---
# rclone completion bash
-Generate the autocompletion script for bash
+Output bash completion script for rclone.
## Synopsis
-Generate the autocompletion script for the bash shell.
-This script depends on the 'bash-completion' package.
-If it is not installed already, you can install it via your OS's package manager.
+Generates a bash shell autocompletion script for rclone.
-To load completions in your current shell session:
+This writes to /etc/bash_completion.d/rclone by default so will
+probably need to be run with sudo or as root, e.g.
- source <(rclone completion bash)
+ sudo rclone genautocomplete bash
-To load completions for every new session, execute once:
+Logout and login again to use the autocompletion scripts, or source
+them directly
-### Linux:
+ . /etc/bash_completion
- rclone completion bash > /etc/bash_completion.d/rclone
+If you supply a command line argument the script will be written
+there.
-### macOS:
-
- rclone completion bash > $(brew --prefix)/etc/bash_completion.d/rclone
-
-You will need to start a new shell for this setup to take effect.
+If output_file is "-", then the output will be written to stdout.
```
-rclone completion bash
+rclone completion bash [output_file] [flags]
```
## Options
```
- -h, --help help for bash
- --no-descriptions disable completion descriptions
+ -h, --help help for bash
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
-* [rclone completion](/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
diff --git a/docs/content/commands/rclone_completion_fish.md b/docs/content/commands/rclone_completion_fish.md
index 5e09dadfb..a3f54c6c9 100644
--- a/docs/content/commands/rclone_completion_fish.md
+++ b/docs/content/commands/rclone_completion_fish.md
@@ -1,43 +1,48 @@
---
title: "rclone completion fish"
-description: "Generate the autocompletion script for fish"
+description: "Output fish completion script for rclone."
slug: rclone_completion_fish
url: /commands/rclone_completion_fish/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/fish/ and as part of making a release run "make commanddocs"
---
# rclone completion fish
-Generate the autocompletion script for fish
+Output fish completion script for rclone.
## Synopsis
-Generate the autocompletion script for the fish shell.
-To load completions in your current shell session:
+Generates a fish autocompletion script for rclone.
- rclone completion fish | source
+This writes to /etc/fish/completions/rclone.fish by default so will
+probably need to be run with sudo or as root, e.g.
-To load completions for every new session, execute once:
+ sudo rclone genautocomplete fish
- rclone completion fish > ~/.config/fish/completions/rclone.fish
+Logout and login again to use the autocompletion scripts, or source
+them directly
-You will need to start a new shell for this setup to take effect.
+ . /etc/fish/completions/rclone.fish
+
+If you supply a command line argument the script will be written
+there.
+
+If output_file is "-", then the output will be written to stdout.
```
-rclone completion fish [flags]
+rclone completion fish [output_file] [flags]
```
## Options
```
- -h, --help help for fish
- --no-descriptions disable completion descriptions
+ -h, --help help for fish
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
-* [rclone completion](/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
diff --git a/docs/content/commands/rclone_completion_powershell.md b/docs/content/commands/rclone_completion_powershell.md
index 8dfbafc45..e52b8130a 100644
--- a/docs/content/commands/rclone_completion_powershell.md
+++ b/docs/content/commands/rclone_completion_powershell.md
@@ -9,7 +9,7 @@ url: /commands/rclone_completion_powershell/
Generate the autocompletion script for powershell
-## Synopsis
+# Synopsis
Generate the autocompletion script for powershell.
@@ -25,7 +25,7 @@ to your powershell profile.
rclone completion powershell [flags]
```
-## Options
+# Options
```
-h, --help help for powershell
@@ -34,7 +34,7 @@ rclone completion powershell [flags]
See the [global flags page](/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone completion](/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
diff --git a/docs/content/commands/rclone_completion_zsh.md b/docs/content/commands/rclone_completion_zsh.md
index 1490817f7..96317551c 100644
--- a/docs/content/commands/rclone_completion_zsh.md
+++ b/docs/content/commands/rclone_completion_zsh.md
@@ -1,54 +1,48 @@
---
title: "rclone completion zsh"
-description: "Generate the autocompletion script for zsh"
+description: "Output zsh completion script for rclone."
slug: rclone_completion_zsh
url: /commands/rclone_completion_zsh/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/zsh/ and as part of making a release run "make commanddocs"
---
# rclone completion zsh
-Generate the autocompletion script for zsh
+Output zsh completion script for rclone.
## Synopsis
-Generate the autocompletion script for the zsh shell.
-If shell completion is not already enabled in your environment you will need
-to enable it. You can execute the following once:
+Generates a zsh autocompletion script for rclone.
- echo "autoload -U compinit; compinit" >> ~/.zshrc
+This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
+probably need to be run with sudo or as root, e.g.
-To load completions in your current shell session:
+ sudo rclone genautocomplete zsh
- source <(rclone completion zsh); compdef _rclone rclone
+Logout and login again to use the autocompletion scripts, or source
+them directly
-To load completions for every new session, execute once:
+ autoload -U compinit && compinit
-### Linux:
+If you supply a command line argument the script will be written
+there.
- rclone completion zsh > "${fpath[1]}/_rclone"
-
-### macOS:
-
- rclone completion zsh > $(brew --prefix)/share/zsh/site-functions/_rclone
-
-You will need to start a new shell for this setup to take effect.
+If output_file is "-", then the output will be written to stdout.
```
-rclone completion zsh [flags]
+rclone completion zsh [output_file] [flags]
```
## Options
```
- -h, --help help for zsh
- --no-descriptions disable completion descriptions
+ -h, --help help for zsh
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
-* [rclone completion](/commands/rclone_completion/) - Generate the autocompletion script for the specified shell
+* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
diff --git a/docs/content/commands/rclone_cryptcheck.md b/docs/content/commands/rclone_cryptcheck.md
index 31181f101..44c48fd96 100644
--- a/docs/content/commands/rclone_cryptcheck.md
+++ b/docs/content/commands/rclone_cryptcheck.md
@@ -13,16 +13,16 @@ Cryptcheck checks the integrity of an encrypted remote.
## Synopsis
-rclone cryptcheck checks a remote against an [encrypted](/crypt/) remote.
+rclone cryptcheck checks a remote against a [crypted](/crypt/) remote.
This is the equivalent of running rclone [check](/commands/rclone_check/),
but able to check the checksums of the encrypted remote.
-For it to work the underlying remote of the encryptedremote must support
+For it to work the underlying remote of the cryptedremote must support
some kind of checksum.
-It works by reading the nonce from each file on the encryptedremote: and
+It works by reading the nonce from each file on the cryptedremote: and
using that to encrypt each file on the remote:. It then checks the
-checksum of the underlying file on the ercryptedremote: against the
+checksum of the underlying file on the cryptedremote: against the
checksum of the file it has just encrypted.
Use it like this
@@ -57,11 +57,12 @@ you what happened to it. These are reminiscent of diff files.
- `* path` means path was present in source and destination but different.
- `! path` means there was an error reading or hashing the source or dest.
-The default number of parallel checks is N=8. See the [--checkers=N](/docs/#checkers-n) option
-for more information.
+The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
+option for more information.
+
```
-rclone cryptcheck remote:path encryptedremote:path [flags]
+rclone cryptcheck remote:path cryptedremote:path [flags]
```
## Options
diff --git a/docs/content/commands/rclone_genautocomplete.md b/docs/content/commands/rclone_genautocomplete.md
index efaa38b07..d9174320f 100644
--- a/docs/content/commands/rclone_genautocomplete.md
+++ b/docs/content/commands/rclone_genautocomplete.md
@@ -10,14 +10,14 @@ versionIntroduced: v1.33
Output completion script for a given shell.
-## Synopsis
+# Synopsis
Generates a shell completion script for rclone.
Run with `--help` to list the supported shells.
-## Options
+# Options
```
-h, --help help for genautocomplete
@@ -25,7 +25,7 @@ Run with `--help` to list the supported shells.
See the [global flags page](/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone genautocomplete bash](/commands/rclone_genautocomplete_bash/) - Output bash completion script for rclone.
diff --git a/docs/content/commands/rclone_genautocomplete_bash.md b/docs/content/commands/rclone_genautocomplete_bash.md
index a771b3af4..8def9b4a9 100644
--- a/docs/content/commands/rclone_genautocomplete_bash.md
+++ b/docs/content/commands/rclone_genautocomplete_bash.md
@@ -9,7 +9,7 @@ url: /commands/rclone_genautocomplete_bash/
Output bash completion script for rclone.
-## Synopsis
+# Synopsis
Generates a bash shell autocompletion script for rclone.
@@ -34,7 +34,7 @@ If output_file is "-", then the output will be written to stdout.
rclone genautocomplete bash [output_file] [flags]
```
-## Options
+# Options
```
-h, --help help for bash
@@ -42,7 +42,7 @@ rclone genautocomplete bash [output_file] [flags]
See the [global flags page](/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
diff --git a/docs/content/commands/rclone_genautocomplete_fish.md b/docs/content/commands/rclone_genautocomplete_fish.md
index 85650f6c8..db124f3ba 100644
--- a/docs/content/commands/rclone_genautocomplete_fish.md
+++ b/docs/content/commands/rclone_genautocomplete_fish.md
@@ -9,7 +9,7 @@ url: /commands/rclone_genautocomplete_fish/
Output fish completion script for rclone.
-## Synopsis
+# Synopsis
Generates a fish autocompletion script for rclone.
@@ -34,7 +34,7 @@ If output_file is "-", then the output will be written to stdout.
rclone genautocomplete fish [output_file] [flags]
```
-## Options
+# Options
```
-h, --help help for fish
@@ -42,7 +42,7 @@ rclone genautocomplete fish [output_file] [flags]
See the [global flags page](/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
diff --git a/docs/content/commands/rclone_genautocomplete_zsh.md b/docs/content/commands/rclone_genautocomplete_zsh.md
index 5da7d7975..33ca51342 100644
--- a/docs/content/commands/rclone_genautocomplete_zsh.md
+++ b/docs/content/commands/rclone_genautocomplete_zsh.md
@@ -9,7 +9,7 @@ url: /commands/rclone_genautocomplete_zsh/
Output zsh completion script for rclone.
-## Synopsis
+# Synopsis
Generates a zsh autocompletion script for rclone.
@@ -34,7 +34,7 @@ If output_file is "-", then the output will be written to stdout.
rclone genautocomplete zsh [output_file] [flags]
```
-## Options
+# Options
```
-h, --help help for zsh
@@ -42,7 +42,7 @@ rclone genautocomplete zsh [output_file] [flags]
See the [global flags page](/flags/) for global options not listed here.
-## SEE ALSO
+# SEE ALSO
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
diff --git a/docs/content/commands/rclone_listremotes.md b/docs/content/commands/rclone_listremotes.md
index c067b790e..45876c645 100644
--- a/docs/content/commands/rclone_listremotes.md
+++ b/docs/content/commands/rclone_listremotes.md
@@ -8,7 +8,7 @@ versionIntroduced: v1.34
---
# rclone listremotes
-List all the remotes in the config file.
+List all the remotes in the config file and defined in environment variables.
## Synopsis
diff --git a/docs/content/commands/rclone_mount.md b/docs/content/commands/rclone_mount.md
index 37583f110..3599c3844 100644
--- a/docs/content/commands/rclone_mount.md
+++ b/docs/content/commands/rclone_mount.md
@@ -272,6 +272,17 @@ Mounting on macOS can be done either via [macFUSE](https://osxfuse.github.io/)
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
which "mounts" via an NFSv4 local server.
+### macFUSE Notes
+
+If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
+the website, rclone will locate the macFUSE libraries without any further intervention.
+If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
+the following addition steps are required.
+
+ sudo mkdir /usr/local/lib
+ cd /usr/local/lib
+ sudo ln -s /opt/local/lib/libfuse.2.dylib
+
### FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are current as
@@ -407,18 +418,20 @@ mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/p
or create systemd mount units:
```
# /etc/systemd/system/mnt-data.mount
+[Unit]
+Description=Mount for /mnt/data
[Mount]
Type=rclone
What=sftp1:subdir
Where=/mnt/data
-Options=rw,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
```
optionally accompanied by systemd automount unit
```
# /etc/systemd/system/mnt-data.automount
[Unit]
-Before=remote-fs.target
+Description=AutoMount for /mnt/data
[Automount]
Where=/mnt/data
TimeoutIdleSec=600
@@ -531,7 +544,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -554,7 +567,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -799,6 +823,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for mount
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -810,7 +835,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_rcat.md b/docs/content/commands/rclone_rcat.md
index a51eda851..0a32eaebf 100644
--- a/docs/content/commands/rclone_rcat.md
+++ b/docs/content/commands/rclone_rcat.md
@@ -38,10 +38,11 @@ and actually stream it, even if remote backend doesn't support streaming.
size of the stream is different in length to the `--size` passed in
then the transfer will likely fail.
-Note that the upload can also not be retried because the data is
-not kept around until the upload succeeds. If you need to transfer
-a lot of data, you're better off caching locally and then
-`rclone move` it to the destination.
+Note that the upload cannot be retried because the data is not stored.
+If the backend supports multipart uploading then individual chunks can
+be retried. If you need to transfer a lot of data, you may be better
+off caching it locally and then `rclone move` it to the
+destination which can use retries.
```
rclone rcat remote:path [flags]
diff --git a/docs/content/commands/rclone_rcd.md b/docs/content/commands/rclone_rcd.md
index d179b606a..5bf9121a7 100644
--- a/docs/content/commands/rclone_rcd.md
+++ b/docs/content/commands/rclone_rcd.md
@@ -25,54 +25,54 @@ See the [rc documentation](/rc/) for more info on the rc flags.
## Server options
-Use `--addr` to specify which IP address and port the server should
-listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all
+Use `--rc-addr` to specify which IP address and port the server should
+listen on, eg `--rc-addr 1.2.3.4:8000` or `--rc-addr :8080` to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
-If you set `--addr` to listen on a public or LAN accessible IP address
+If you set `--rc-addr` to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
You can use a unix socket by setting the url to `unix:///path/to/socket`
or just by using an absolute path name. Note that unix sockets bypass the
authentication - this is expected to be done with file system permissions.
-`--addr` may be repeated to listen on multiple IPs/ports/sockets.
+`--rc-addr` may be repeated to listen on multiple IPs/ports/sockets.
-`--server-read-timeout` and `--server-write-timeout` can be used to
+`--rc-server-read-timeout` and `--rc-server-write-timeout` can be used to
control the timeouts on the server. Note that this is the total time
for a transfer.
-`--max-header-bytes` controls the maximum number of bytes the server will
+`--rc-max-header-bytes` controls the maximum number of bytes the server will
accept in the HTTP header.
-`--baseurl` controls the URL prefix that rclone serves from. By default
-rclone will serve from the root. If you used `--baseurl "/rclone"` then
+`--rc-baseurl` controls the URL prefix that rclone serves from. By default
+rclone will serve from the root. If you used `--rc-baseurl "/rclone"` then
rclone would serve from a URL starting with "/rclone/". This is
useful if you wish to proxy rclone serve. Rclone automatically
-inserts leading and trailing "/" on `--baseurl`, so `--baseurl "rclone"`,
-`--baseurl "/rclone"` and `--baseurl "/rclone/"` are all treated
+inserts leading and trailing "/" on `--rc-baseurl`, so `--rc-baseurl "rclone"`,
+`--rc-baseurl "/rclone"` and `--rc-baseurl "/rclone/"` are all treated
identically.
### TLS (SSL)
By default this will serve over http. If you want you can serve over
-https. You will need to supply the `--cert` and `--key` flags.
+https. You will need to supply the `--rc-cert` and `--rc-key` flags.
If you wish to do client side certificate validation then you will need to
-supply `--client-ca` also.
+supply `--rc-client-ca` also.
-`--cert` should be a either a PEM encoded certificate or a concatenation
-of that with the CA certificate. `--key` should be the PEM encoded
-private key and `--client-ca` should be the PEM encoded client
+`--rc-cert` should be a either a PEM encoded certificate or a concatenation
+of that with the CA certificate. `--krc-ey` should be the PEM encoded
+private key and `--rc-client-ca` should be the PEM encoded client
certificate authority certificate.
---min-tls-version is minimum TLS version that is acceptable. Valid
+--rc-min-tls-version is minimum TLS version that is acceptable. Valid
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
"tls1.0").
### Template
-`--template` allows a user to specify a custom markup template for HTTP
+`--rc-template` allows a user to specify a custom markup template for HTTP
and WebDAV serve functions. The server exports the following markup
to be used within the template to server pages:
@@ -100,9 +100,13 @@ to be used within the template to server pages:
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
-set a single username and password with the `--user` and `--pass` flags.
+set a single username and password with the `--rc-user` and `--rc-pass` flags.
-Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
+Use `--rc-htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -114,9 +118,9 @@ To create an htpasswd file:
The password file can be updated while rclone is running.
-Use `--realm` to set the authentication realm.
+Use `--rc-realm` to set the authentication realm.
-Use `--salt` to change the password hashing salt from the default.
+Use `--rc-salt` to change the password hashing salt from the default.
```
diff --git a/docs/content/commands/rclone_serve_dlna.md b/docs/content/commands/rclone_serve_dlna.md
index 00718ef47..896cd523d 100644
--- a/docs/content/commands/rclone_serve_dlna.md
+++ b/docs/content/commands/rclone_serve_dlna.md
@@ -112,7 +112,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -135,7 +135,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -379,7 +390,7 @@ rclone serve dlna remote:path [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_serve_docker.md b/docs/content/commands/rclone_serve_docker.md
index a7d02fc03..b08eb88ce 100644
--- a/docs/content/commands/rclone_serve_docker.md
+++ b/docs/content/commands/rclone_serve_docker.md
@@ -128,7 +128,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -151,7 +151,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -398,6 +409,7 @@ rclone serve docker [flags]
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for docker
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
@@ -412,7 +424,7 @@ rclone serve docker [flags]
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_serve_ftp.md b/docs/content/commands/rclone_serve_ftp.md
index 475e7bb21..e90c6915f 100644
--- a/docs/content/commands/rclone_serve_ftp.md
+++ b/docs/content/commands/rclone_serve_ftp.md
@@ -109,7 +109,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -132,7 +132,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -460,7 +471,7 @@ rclone serve ftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default "anonymous")
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_serve_http.md b/docs/content/commands/rclone_serve_http.md
index 0986a9302..4dd07c6f3 100644
--- a/docs/content/commands/rclone_serve_http.md
+++ b/docs/content/commands/rclone_serve_http.md
@@ -103,6 +103,10 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the `--user` and `--pass` flags.
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -195,7 +199,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -218,7 +222,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -554,7 +569,7 @@ rclone serve http remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_serve_restic.md b/docs/content/commands/rclone_serve_restic.md
index cbd8cd5ac..d5bc88f8d 100644
--- a/docs/content/commands/rclone_serve_restic.md
+++ b/docs/content/commands/rclone_serve_restic.md
@@ -148,6 +148,10 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the `--user` and `--pass` flags.
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
diff --git a/docs/content/commands/rclone_serve_sftp.md b/docs/content/commands/rclone_serve_sftp.md
index 680a42014..f2dfac46f 100644
--- a/docs/content/commands/rclone_serve_sftp.md
+++ b/docs/content/commands/rclone_serve_sftp.md
@@ -141,7 +141,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -164,7 +164,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -492,7 +503,7 @@ rclone serve sftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_serve_webdav.md b/docs/content/commands/rclone_serve_webdav.md
index 55bd16069..ac9a0d86d 100644
--- a/docs/content/commands/rclone_serve_webdav.md
+++ b/docs/content/commands/rclone_serve_webdav.md
@@ -132,6 +132,10 @@ By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the `--user` and `--pass` flags.
+If no static users are configured by either of the above methods, and client
+certificates are required by the `--client-ca` flag passed to the server, the
+client certificate common name will be considered as the username.
+
Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
@@ -224,7 +228,7 @@ find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -247,7 +251,18 @@ flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
-evicted from the cache.
+evicted from the cache. When `--vfs-cache-max-size`
+is exceeded, rclone will attempt to evict the least accessed files
+from the cache first. rclone will start with files that haven't
+been accessed for the longest. This cache flushing strategy is
+efficient and more relevant files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
@@ -585,7 +600,7 @@ rclone serve webdav remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
diff --git a/docs/content/commands/rclone_size.md b/docs/content/commands/rclone_size.md
index ad58f6f11..2fbbfea5c 100644
--- a/docs/content/commands/rclone_size.md
+++ b/docs/content/commands/rclone_size.md
@@ -26,7 +26,7 @@ recursion.
Some backends do not always provide file sizes, see for example
[Google Photos](/googlephotos/#size) and
-[Google Drive](/drive/#limitations-of-google-docs).
+[Google Docs](/drive/#limitations-of-google-docs).
Rclone will then show a notice in the log indicating how many such
files were encountered, and count them in as empty files in the output
of the size command.
diff --git a/docs/content/crypt.md b/docs/content/crypt.md
index bb2f58c3a..63ebab273 100644
--- a/docs/content/crypt.md
+++ b/docs/content/crypt.md
@@ -454,7 +454,7 @@ Properties:
- Very simple filename obfuscation.
- "off"
- Don't encrypt the file names.
- - Adds a ".bin" extension only.
+ - Adds a ".bin", or "suffix" extension only.
#### --crypt-directory-name-encryption
@@ -509,6 +509,8 @@ Here are the Advanced options specific to crypt (Encrypt/Decrypt a remote).
#### --crypt-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
@@ -562,6 +564,21 @@ Properties:
- "false"
- Encrypt file data.
+#### --crypt-pass-bad-blocks
+
+If set this will pass bad blocks through as all 0.
+
+This should not be set in normal operation, it should only be set if
+trying to recover an encrypted file with errors and it is desired to
+recover as much of the file as possible.
+
+Properties:
+
+- Config: pass_bad_blocks
+- Env Var: RCLONE_CRYPT_PASS_BAD_BLOCKS
+- Type: bool
+- Default: false
+
#### --crypt-filename-encoding
How to encode the encrypted filename to text string.
@@ -583,7 +600,21 @@ Properties:
- Encode using base64. Suitable for case sensitive remote.
- "base32768"
- Encode using base32768. Suitable if your remote counts UTF-16 or
- - Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)
+ - Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)
+
+#### --crypt-suffix
+
+If this is set it will override the default suffix of ".bin".
+
+Setting suffix to "none" will result in an empty suffix. This may be useful
+when the path length is critical.
+
+Properties:
+
+- Config: suffix
+- Env Var: RCLONE_CRYPT_SUFFIX
+- Type: string
+- Default: ".bin"
### Metadata
diff --git a/docs/content/drive.md b/docs/content/drive.md
index 12d9b0545..dd7e438e1 100644
--- a/docs/content/drive.md
+++ b/docs/content/drive.md
@@ -1066,6 +1066,8 @@ Properties:
#### --drive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two
@@ -1215,6 +1217,24 @@ Properties:
- Type: MultiEncoder
- Default: InvalidUtf8
+#### --drive-env-auth
+
+Get IAM credentials from runtime (environment variables or instance meta data if no env vars).
+
+Only applies if service_account_file and service_account_credentials is blank.
+
+Properties:
+
+- Config: env_auth
+- Env Var: RCLONE_DRIVE_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+ - "false"
+ - Enter credentials in the next step.
+ - "true"
+ - Get GCP IAM credentials from the environment (env vars or IAM).
+
## Backend commands
Here are the commands specific to the drive backend.
diff --git a/docs/content/dropbox.md b/docs/content/dropbox.md
index cb9af2e1c..0d462eddb 100644
--- a/docs/content/dropbox.md
+++ b/docs/content/dropbox.md
@@ -406,8 +406,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
-- batch_mode: async - default batch_timeout is 500ms
-- batch_mode: sync - default batch_timeout is 10s
+- batch_mode: async - default batch_timeout is 10s
+- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
@@ -429,6 +429,17 @@ Properties:
- Type: Duration
- Default: 10m0s
+#### --dropbox-pacer-min-sleep
+
+Minimum time to sleep between API calls.
+
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_DROPBOX_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
#### --dropbox-encoding
The encoding for the backend.
diff --git a/docs/content/fichier.md b/docs/content/fichier.md
index e4470faea..a86d72bbb 100644
--- a/docs/content/fichier.md
+++ b/docs/content/fichier.md
@@ -171,6 +171,17 @@ Properties:
- Type: string
- Required: false
+#### --fichier-cdn
+
+Set if you wish to use CDN download links.
+
+Properties:
+
+- Config: cdn
+- Env Var: RCLONE_FICHIER_CDN
+- Type: bool
+- Default: false
+
#### --fichier-encoding
The encoding for the backend.
diff --git a/docs/content/flags.md b/docs/content/flags.md
index 51e9f931f..8fbe43a4a 100644
--- a/docs/content/flags.md
+++ b/docs/content/flags.md
@@ -13,166 +13,169 @@ split into two groups, non backend and backend flags.
These flags are available for every command.
```
- --ask-password Allow prompt for password for encrypted configuration (default true)
- --auto-confirm If enabled, do not request console confirmation
- --backup-dir string Make backups into hierarchy based in DIR
- --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
- --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
- --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --ca-cert stringArray CA certificate used to verify servers
- --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
- --check-first Do all the checks before starting transfers
- --checkers int Number of checkers to run in parallel (default 8)
- -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
- --client-cert string Client SSL certificate (PEM) for mutual TLS auth
- --client-key string Client SSL private key (PEM) for mutual TLS auth
- --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
- --compare-dest stringArray Include additional comma separated server-side paths during comparison
- --config string Config file (default "$HOME/.config/rclone/rclone.conf")
- --contimeout Duration Connect timeout (default 1m0s)
- --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
- --cpuprofile string Write cpu profile to file
- --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
- --delete-after When synchronizing, delete files on destination after transferring (default)
- --delete-before When synchronizing, delete files on destination before transferring
- --delete-during When synchronizing, delete files during transfer
- --delete-excluded Delete files on dest excluded from sync
- --disable string Disable a comma separated list of features (use --disable help to see a list)
- --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
- --disable-http2 Disable HTTP/2 in the global transport
- -n, --dry-run Do a trial run with no permanent changes
- --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
- --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
- --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
- --dump-headers Dump HTTP headers - may contain sensitive info
- --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
- --fast-list Use recursive list if available; uses more memory but fewer transactions
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
- --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
- --header stringArray Set HTTP header for all transactions
- --header-download stringArray Set HTTP header for download transactions
- --header-upload stringArray Set HTTP header for upload transactions
- --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
- --ignore-case Ignore case in filters (case insensitive)
- --ignore-case-sync Ignore case when synchronizing
- --ignore-checksum Skip post copy check of checksums
- --ignore-errors Delete even if there are I/O errors
- --ignore-existing Skip all files that exist on destination
- --ignore-size Ignore size when skipping use mod-time or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
- --immutable Do not modify files, fail if existing files have been modified
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- -i, --interactive Enable interactive mode
- --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
- --log-file string Log everything to this file
- --log-format string Comma separated list of log format options (default "date,time")
- --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
- --log-systemd Activate systemd integration for the logger
- --low-level-retries int Number of low level retries to do (default 10)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
- --max-delete int When synchronizing, limit the number of deletes (default -1)
- --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
- --max-transfer SizeSuffix Maximum size of data to transfer (default off)
- --memprofile string Write memory profile to file
- -M, --metadata If set, preserve metadata when copying objects
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --metadata-set stringArray Add metadata key=value when uploading
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
- --modify-window Duration Max time diff to be considered the same (default 1ns)
- --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
- --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
- --no-check-certificate Do not verify the server SSL certificate (insecure)
- --no-check-dest Don't check the destination, copy regardless
- --no-console Hide console window (supported on Windows only)
- --no-gzip-encoding Don't set Accept-Encoding: gzip
- --no-traverse Don't traverse destination file system on copy
- --no-unicode-normalization Don't normalize unicode characters in filenames
- --no-update-modtime Don't update destination mod-time if files identical
- --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
- --password-command SpaceSepList Command for supplying password for encrypted configuration
- -P, --progress Show progress during transfer
- --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
- -q, --quiet Print as little stuff as possible
- --rc Enable the remote control server
- --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
- --rc-allow-origin string Set the allowed origin for CORS
- --rc-baseurl string Prefix for URLs - leave blank for root
- --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
- --rc-client-ca string Client certificate authority to verify clients with
- --rc-enable-metrics Enable prometheus metrics on /metrics
- --rc-files string Path to local files to serve on the HTTP server
- --rc-htpasswd string A htpasswd file - if not provided no authentication is done
- --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
- --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
- --rc-key string TLS PEM Private key
- --rc-max-header-bytes int Maximum size of request header (default 4096)
- --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
- --rc-no-auth Don't require auth for certain methods
- --rc-pass string Password for authentication
- --rc-realm string Realm for authentication
- --rc-salt string Password hashing salt (default "dlPL2MqE")
- --rc-serve Enable the serving of remote objects
- --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --rc-template string User-specified template
- --rc-user string User name for authentication
- --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
- --rc-web-gui Launch WebGUI on localhost
- --rc-web-gui-force-update Force update to latest version of web gui
- --rc-web-gui-no-open-browser Don't open the browser automatically
- --rc-web-gui-update Check and update to latest version of web gui
- --refresh-times Refresh the modtime of remote files
- --retries int Retry operations this many times if they fail (default 3)
- --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
- --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
- --size-only Skip based on size only, not mod-time or checksum
- --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
- --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
- --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
- --stats-one-line Make the stats fit on one line
- --stats-one-line-date Enable --stats-one-line and add current date/time prefix
- --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
- --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
- --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
- --suffix string Suffix to add to changed files
- --suffix-keep-extension Preserve the extension when using --suffix
- --syslog Use Syslog for logging
- --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
- --temp-dir string Directory rclone will use for temporary files (default "/tmp")
- --timeout Duration IO idle timeout (default 5m0s)
- --tpslimit float Limit HTTP transactions per second to this
- --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
- --track-renames When synchronizing, track file renames and do a server-side move if possible
- --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
- --transfers int Number of file transfers to run in parallel (default 4)
- -u, --update Skip files that are newer on the destination
- --use-cookies Enable session cookiejar
- --use-json-log Use json log format
- --use-mmap Use mmap allocator (see docs)
- --use-server-modtime Use server modified time instead of object metadata
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.62.0")
- -v, --verbose count Print lots more stuff (repeat for more)
+ --ask-password Allow prompt for password for encrypted configuration (default true)
+ --auto-confirm If enabled, do not request console confirmation
+ --backup-dir string Make backups into hierarchy based in DIR
+ --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
+ --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
+ --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --ca-cert stringArray CA certificate used to verify servers
+ --cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
+ --check-first Do all the checks before starting transfers
+ --checkers int Number of checkers to run in parallel (default 8)
+ -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
+ --client-cert string Client SSL certificate (PEM) for mutual TLS auth
+ --client-key string Client SSL private key (PEM) for mutual TLS auth
+ --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default "AUTO")
+ --compare-dest stringArray Include additional comma separated server-side paths during comparison
+ --config string Config file (default "$HOME/.config/rclone/rclone.conf")
+ --contimeout Duration Connect timeout (default 1m0s)
+ --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
+ --cpuprofile string Write cpu profile to file
+ --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
+ --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
+ --delete-after When synchronizing, delete files on destination after transferring (default)
+ --delete-before When synchronizing, delete files on destination before transferring
+ --delete-during When synchronizing, delete files during transfer
+ --delete-excluded Delete files on dest excluded from sync
+ --disable string Disable a comma separated list of features (use --disable help to see a list)
+ --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
+ --disable-http2 Disable HTTP/2 in the global transport
+ -n, --dry-run Do a trial run with no permanent changes
+ --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
+ --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
+ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
+ --dump-headers Dump HTTP headers - may contain sensitive info
+ --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
+ --fast-list Use recursive list if available; uses more memory but fewer transactions
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
+ --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
+ --header stringArray Set HTTP header for all transactions
+ --header-download stringArray Set HTTP header for download transactions
+ --header-upload stringArray Set HTTP header for upload transactions
+ --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
+ --ignore-case Ignore case in filters (case insensitive)
+ --ignore-case-sync Ignore case when synchronizing
+ --ignore-checksum Skip post copy check of checksums
+ --ignore-errors Delete even if there are I/O errors
+ --ignore-existing Skip all files that exist on destination
+ --ignore-size Ignore size when skipping use mod-time or checksum
+ -I, --ignore-times Don't skip files that match size and time - transfer all files
+ --immutable Do not modify files, fail if existing files have been modified
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --inplace Download directly to destination file instead of atomic download to temp/rename
+ -i, --interactive Enable interactive mode
+ --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
+ --log-file string Log everything to this file
+ --log-format string Comma separated list of log format options (default "date,time")
+ --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
+ --log-systemd Activate systemd integration for the logger
+ --low-level-retries int Number of low level retries to do (default 10)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
+ --max-delete int When synchronizing, limit the number of deletes (default -1)
+ --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
+ --max-transfer SizeSuffix Maximum size of data to transfer (default off)
+ --memprofile string Write memory profile to file
+ -M, --metadata If set, preserve metadata when copying objects
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --metadata-set stringArray Add metadata key=value when uploading
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+ --modify-window Duration Max time diff to be considered the same (default 1ns)
+ --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
+ --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
+ --multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
+ --no-check-certificate Do not verify the server SSL certificate (insecure)
+ --no-check-dest Don't check the destination, copy regardless
+ --no-console Hide console window (supported on Windows only)
+ --no-gzip-encoding Don't set Accept-Encoding: gzip
+ --no-traverse Don't traverse destination file system on copy
+ --no-unicode-normalization Don't normalize unicode characters in filenames
+ --no-update-modtime Don't update destination mod-time if files identical
+ --order-by string Instructions on how to order the transfers, e.g. 'size,descending'
+ --password-command SpaceSepList Command for supplying password for encrypted configuration
+ -P, --progress Show progress during transfer
+ --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
+ -q, --quiet Print as little stuff as possible
+ --rc Enable the remote control server
+ --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
+ --rc-allow-origin string Set the allowed origin for CORS
+ --rc-baseurl string Prefix for URLs - leave blank for root
+ --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --rc-client-ca string Client certificate authority to verify clients with
+ --rc-enable-metrics Enable prometheus metrics on /metrics
+ --rc-files string Path to local files to serve on the HTTP server
+ --rc-htpasswd string A htpasswd file - if not provided no authentication is done
+ --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
+ --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
+ --rc-key string TLS PEM Private key
+ --rc-max-header-bytes int Maximum size of request header (default 4096)
+ --rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
+ --rc-no-auth Don't require auth for certain methods
+ --rc-pass string Password for authentication
+ --rc-realm string Realm for authentication
+ --rc-salt string Password hashing salt (default "dlPL2MqE")
+ --rc-serve Enable the serving of remote objects
+ --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --rc-template string User-specified template
+ --rc-user string User name for authentication
+ --rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
+ --rc-web-gui Launch WebGUI on localhost
+ --rc-web-gui-force-update Force update to latest version of web gui
+ --rc-web-gui-no-open-browser Don't open the browser automatically
+ --rc-web-gui-update Check and update to latest version of web gui
+ --refresh-times Refresh the modtime of remote files
+ --retries int Retry operations this many times if they fail (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
+ --size-only Skip based on size only, not mod-time or checksum
+ --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
+ --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
+ --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
+ --stats-one-line Make the stats fit on one line
+ --stats-one-line-date Enable --stats-one-line and add current date/time prefix
+ --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
+ --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
+ --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
+ --suffix string Suffix to add to changed files
+ --suffix-keep-extension Preserve the extension when using --suffix
+ --syslog Use Syslog for logging
+ --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
+ --temp-dir string Directory rclone will use for temporary files (default "/tmp")
+ --timeout Duration IO idle timeout (default 5m0s)
+ --tpslimit float Limit HTTP transactions per second to this
+ --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
+ --track-renames When synchronizing, track file renames and do a server-side move if possible
+ --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
+ --transfers int Number of file transfers to run in parallel (default 4)
+ -u, --update Skip files that are newer on the destination
+ --use-cookies Enable session cookiejar
+ --use-json-log Use json log format
+ --use-mmap Use mmap allocator (see docs)
+ --use-server-modtime Use server modified time instead of object metadata
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
+ -v, --verbose count Print lots more stuff (repeat for more)
```
## Backend Flags
@@ -181,552 +184,579 @@ These flags are available for every command. They control the backends
and may be set in the config file.
```
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
- --alias-remote string Remote or path to alias
- --azureblob-access-tier string Access tier of blob: hot, cool or archive
- --azureblob-account string Azure Storage Account Name
- --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
- --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
- --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
- --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
- --azureblob-client-id string The ID of the client in use
- --azureblob-client-secret string One of the service principal's client secrets
- --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
- --azureblob-disable-checksum Don't store MD5 checksum with object metadata
- --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
- --azureblob-endpoint string Endpoint for the service
- --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
- --azureblob-key string Storage Account Shared Key
- --azureblob-list-chunk int Size of blob list (default 5000)
- --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
- --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-no-check-container If set, don't attempt to check the container exists or create it
- --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
- --azureblob-password string The user's password (obscured)
- --azureblob-public-access string Public access level of a container: blob or container
- --azureblob-sas-url string SAS URL for container level access only
- --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
- --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
- --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
- --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
- --azureblob-use-emulator Uses local storage emulator if provided as 'true'
- --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
- --azureblob-username string User name (usually an email address)
- --b2-account string Account ID or Application Key ID
- --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
- --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
- --b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
- --b2-download-url string Custom endpoint for downloads
- --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --b2-endpoint string Endpoint for the service
- --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
- --b2-key string Application Key
- --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
- --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --b2-version-at Time Show file versions as they were at the specified time (default off)
- --b2-versions Include old versions in directory listings
- --box-access-token string Box App Primary Access Token
- --box-auth-url string Auth server URL
- --box-box-config-file string Box App config.json location
- --box-box-sub-type string (default "user")
- --box-client-id string OAuth Client Id
- --box-client-secret string OAuth Client Secret
- --box-commit-retries int Max number of times to try committing a multipart file (default 100)
- --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
- --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
- --box-owned-by string Only show items owned by the login (email address) passed in
- --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
- --box-token string OAuth Access Token as a JSON blob
- --box-token-url string Token server url
- --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
- --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
- --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
- --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
- --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
- --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
- --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
- --cache-db-purge Clear all the cached data for this remote on start
- --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
- --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
- --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
- --cache-plex-password string The password of the Plex user (obscured)
- --cache-plex-url string The URL of the Plex server
- --cache-plex-username string The username of the Plex user
- --cache-read-retries int How many times to retry a read from a cache storage (default 10)
- --cache-remote string Remote to cache
- --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
- --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
- --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
- --cache-workers int How many workers should run in parallel to download chunks (default 4)
- --cache-writes Cache file data on writes through the FS
- --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
- --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
- --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
- --chunker-remote string Remote to chunk/unchunk
- --combine-upstreams SpaceSepList Upstreams for combining
- --compress-level int GZIP compression level (-2 to 9) (default -1)
- --compress-mode string Compression mode (default "gzip")
- --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
- --compress-remote string Remote to compress
- -L, --copy-links Follow symlinks and copy the pointed to item
- --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
- --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
- --crypt-filename-encryption string How to encrypt the filenames (default "standard")
- --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
- --crypt-password string Password or pass phrase for encryption (obscured)
- --crypt-password2 string Password or pass phrase for salt (obscured)
- --crypt-remote string Remote to encrypt/decrypt
- --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs
- --crypt-show-mapping For all files listed show how the names encrypt
- --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
- --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
- --drive-auth-owner-only Only consider files owned by the authenticated user
- --drive-auth-url string Auth server URL
- --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
- --drive-client-id string Google Application Client Id
- --drive-client-secret string OAuth Client Secret
- --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
- --drive-disable-http2 Disable drive using http2 (default true)
- --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
- --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
- --drive-formats string Deprecated: See export_formats
- --drive-impersonate string Impersonate this user when using a service account
- --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
- --drive-keep-revision-forever Keep new head revision of each file forever
- --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
- --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
- --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
- --drive-resource-key string Resource key for accessing a link-shared file
- --drive-root-folder-id string ID of the root folder
- --drive-scope string Scope that rclone should use when requesting access from drive
- --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs
- --drive-service-account-credentials string Service Account Credentials JSON blob
- --drive-service-account-file string Service Account Credentials JSON file path
- --drive-shared-with-me Only show files that are shared with me
- --drive-size-as-quota Show sizes as storage quota usage, not actual size
- --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
- --drive-skip-dangling-shortcuts If set skip dangling shortcut files
- --drive-skip-gdocs Skip google documents in all listings
- --drive-skip-shortcuts If set skip shortcut files
- --drive-starred-only Only show files that are starred
- --drive-stop-on-download-limit Make download limit errors be fatal
- --drive-stop-on-upload-limit Make upload limit errors be fatal
- --drive-team-drive string ID of the Shared Drive (Team Drive)
- --drive-token string OAuth Access Token as a JSON blob
- --drive-token-url string Token server url
- --drive-trashed-only Only show files that are in the trash
- --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
- --drive-use-created-date Use file created date instead of modified date
- --drive-use-shared-date Use date file was shared instead of modified date
- --drive-use-trash Send files to the trash instead of deleting permanently (default true)
- --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
- --dropbox-auth-url string Auth server URL
- --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
- --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
- --dropbox-batch-size int Max number of files in upload batch
- --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
- --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
- --dropbox-client-id string OAuth Client Id
- --dropbox-client-secret string OAuth Client Secret
- --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
- --dropbox-impersonate string Impersonate this user when using a business account
- --dropbox-shared-files Instructs rclone to work on individual shared files
- --dropbox-shared-folders Instructs rclone to work on shared folders
- --dropbox-token string OAuth Access Token as a JSON blob
- --dropbox-token-url string Token server url
- --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
- --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
- --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
- --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
- --fichier-shared-folder string If you want to download a shared folder, add this parameter
- --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --filefabric-permanent-token string Permanent Authentication Token
- --filefabric-root-folder-id string ID of the root folder
- --filefabric-token string Session Token
- --filefabric-token-expiry string Token expiry time
- --filefabric-url string URL of the Enterprise File Fabric to connect to
- --filefabric-version string Version read from the file fabric
- --ftp-ask-password Allow asking for FTP password when needed
- --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
- --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
- --ftp-disable-epsv Disable using EPSV even if server advertises support
- --ftp-disable-mlsd Disable using MLSD even if server advertises support
- --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
- --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
- --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
- --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
- --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
- --ftp-host string FTP host to connect to
- --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --ftp-no-check-certificate Do not verify the TLS certificate of the server
- --ftp-pass string FTP password (obscured)
- --ftp-port int FTP port number (default 21)
- --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
- --ftp-tls Use Implicit FTPS (FTP over TLS)
- --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
- --ftp-user string FTP username (default "$USER")
- --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
- --gcs-anonymous Access public buckets and objects without credentials
- --gcs-auth-url string Auth server URL
- --gcs-bucket-acl string Access Control List for new buckets
- --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
- --gcs-client-id string OAuth Client Id
- --gcs-client-secret string OAuth Client Secret
- --gcs-decompress If set this will decompress gzip encoded objects
- --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gcs-endpoint string Endpoint for the service
- --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
- --gcs-location string Location for the newly created buckets
- --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --gcs-object-acl string Access Control List for new objects
- --gcs-project-number string Project number
- --gcs-service-account-file string Service Account Credentials JSON file path
- --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
- --gcs-token string OAuth Access Token as a JSON blob
- --gcs-token-url string Token server url
- --gphotos-auth-url string Auth server URL
- --gphotos-client-id string OAuth Client Id
- --gphotos-client-secret string OAuth Client Secret
- --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gphotos-include-archived Also view and download archived media
- --gphotos-read-only Set to make the Google Photos backend read only
- --gphotos-read-size Set to read the size of media items
- --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
- --gphotos-token string OAuth Access Token as a JSON blob
- --gphotos-token-url string Token server url
- --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
- --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
- --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
- --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
- --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
- --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
- --hdfs-namenode string Hadoop name node and port
- --hdfs-service-principal-name string Kerberos service principal name for the namenode
- --hdfs-username string Hadoop user name
- --hidrive-auth-url string Auth server URL
- --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
- --hidrive-client-id string OAuth Client Id
- --hidrive-client-secret string OAuth Client Secret
- --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
- --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
- --hidrive-root-prefix string The root/parent folder for all paths (default "/")
- --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
- --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
- --hidrive-token string OAuth Access Token as a JSON blob
- --hidrive-token-url string Token server url
- --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
- --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
- --http-headers CommaSepList Set HTTP headers for all transactions
- --http-no-head Don't use HEAD requests
- --http-no-slash Set this if the site doesn't end directories with /
- --http-url string URL of HTTP host to connect to
- --internetarchive-access-key-id string IAS3 Access Key
- --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
- --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
- --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
- --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
- --internetarchive-secret-access-key string IAS3 Secret Key (password)
- --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
- --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
- --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
- --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
- --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
- --jottacloud-trashed-only Only show files that are in the trash
- --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
- --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --koofr-endpoint string The Koofr API endpoint to use
- --koofr-mountid string Mount ID of the mount to use
- --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
- --koofr-provider string Choose your storage provider
- --koofr-setmtime Does the backend support setting modification time (default true)
- --koofr-user string Your user name
- -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
- --local-case-insensitive Force the filesystem to report itself as case insensitive
- --local-case-sensitive Force the filesystem to report itself as case sensitive
- --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --local-no-check-updated Don't check to see if the files change during upload
- --local-no-preallocate Disable preallocation of disk space for transferred files
- --local-no-set-modtime Disable setting modtime
- --local-no-sparse Disable sparse files for multi-thread downloads
- --local-nounc Disable UNC (long path names) conversion on Windows
- --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
- --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
- --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
- --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --mailru-pass string Password (obscured)
- --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
- --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
- --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
- --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
- --mailru-user string User name (usually email)
- --mega-debug Output more debug from Mega
- --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --mega-hard-delete Delete files permanently rather than putting them into the trash
- --mega-pass string Password (obscured)
- --mega-use-https Use HTTPS for transfers
- --mega-user string User name
- --netstorage-account string Set the NetStorage account name
- --netstorage-host string Domain+path of NetStorage host to connect to
- --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
- --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
- -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
- --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
- --onedrive-auth-url string Auth server URL
- --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
- --onedrive-client-id string OAuth Client Id
- --onedrive-client-secret string OAuth Client Secret
- --onedrive-drive-id string The ID of the drive to use
- --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
- --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
- --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
- --onedrive-link-password string Set the password for links created by the link command
- --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
- --onedrive-link-type string Set the type of the links created by the link command (default "view")
- --onedrive-list-chunk int Size of listing chunk (default 1000)
- --onedrive-no-versions Remove all versions on modifying operations
- --onedrive-region string Choose national cloud region for OneDrive (default "global")
- --onedrive-root-folder-id string ID of the root folder
- --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
- --onedrive-token string OAuth Access Token as a JSON blob
- --onedrive-token-url string Token server url
- --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --oos-compartment string Object storage compartment OCID
- --oos-config-file string Path to OCI config file (default "~/.oci/config")
- --oos-config-profile string Profile name inside the oci config file (default "Default")
- --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --oos-copy-timeout Duration Timeout for copy (default 1m0s)
- --oos-disable-checksum Don't store MD5 checksum with object metadata
- --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --oos-endpoint string Endpoint for Object storage API
- --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --oos-namespace string Object storage namespace
- --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --oos-provider string Choose your Auth Provider (default "env_auth")
- --oos-region string Object storage Region
- --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
- --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
- --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
- --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
- --oos-sse-kms-key-id string if using using your own master key in vault, this header specifies the
- --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
- --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
- --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
- --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
- --opendrive-password string Password (obscured)
- --opendrive-username string Username
- --pcloud-auth-url string Auth server URL
- --pcloud-client-id string OAuth Client Id
- --pcloud-client-secret string OAuth Client Secret
- --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
- --pcloud-password string Your pcloud password (obscured)
- --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
- --pcloud-token string OAuth Access Token as a JSON blob
- --pcloud-token-url string Token server url
- --pcloud-username string Your pcloud username
- --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --qingstor-access-key-id string QingStor Access Key ID
- --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
- --qingstor-connection-retries int Number of connection retries (default 3)
- --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
- --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
- --qingstor-env-auth Get QingStor credentials from runtime
- --qingstor-secret-access-key string QingStor Secret Access Key (password)
- --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
- --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --qingstor-zone string Zone to connect to
- --s3-access-key-id string AWS Access Key ID
- --s3-acl string Canned ACL used when creating buckets and storing or copying objects
- --s3-bucket-acl string Canned ACL used when creating buckets
- --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --s3-decompress If set this will decompress gzip encoded objects
- --s3-disable-checksum Don't store MD5 checksum with object metadata
- --s3-disable-http2 Disable usage of http2 for S3 backends
- --s3-download-url string Custom endpoint for downloads
- --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --s3-endpoint string Endpoint for S3 API
- --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
- --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
- --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
- --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
- --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
- --s3-location-constraint string Location constraint - must be set to match the Region
- --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
- --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
- --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
- --s3-no-head If set, don't HEAD uploaded objects to check integrity
- --s3-no-head-object If set, do not do HEAD before GET when getting objects
- --s3-no-system-metadata Suppress setting and reading of system metadata
- --s3-profile string Profile to use in the shared credentials file
- --s3-provider string Choose your S3 provider
- --s3-region string Region to connect to
- --s3-requester-pays Enables requester pays option when interacting with S3 bucket
- --s3-secret-access-key string AWS Secret Access Key (password)
- --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
- --s3-session-token string An AWS session token
- --s3-shared-credentials-file string Path to the shared credentials file
- --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
- --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
- --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
- --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
- --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
- --s3-storage-class string The storage class to use when storing new objects in S3
- --s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
- --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
- --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
- --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
- --s3-v2-auth If true use v2 authentication
- --s3-version-at Time Show file versions as they were at the specified time (default off)
- --s3-versions Include old versions in directory listings
- --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
- --seafile-create-library Should rclone create a library if it doesn't exist
- --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
- --seafile-library string Name of the library
- --seafile-library-key string Library password (for encrypted libraries only) (obscured)
- --seafile-pass string Password (obscured)
- --seafile-url string URL of seafile host to connect to
- --seafile-user string User name (usually email address)
- --sftp-ask-password Allow asking for SFTP password when needed
- --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
- --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
- --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
- --sftp-disable-concurrent-reads If set don't use concurrent reads
- --sftp-disable-concurrent-writes If set don't use concurrent writes
- --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
- --sftp-host string SSH host to connect to
- --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
- --sftp-key-file string Path to PEM-encoded private key file
- --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
- --sftp-key-pem string Raw PEM-encoded private key
- --sftp-key-use-agent When set forces the usage of the ssh-agent
- --sftp-known-hosts-file string Optional path to known_hosts file
- --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
- --sftp-md5sum-command string The command used to read md5 hashes
- --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
- --sftp-path-override string Override path used by SSH shell commands
- --sftp-port int SSH port number (default 22)
- --sftp-pubkey-file string Optional path to public key file
- --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
- --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
- --sftp-set-modtime Set the modified time on the remote if set (default true)
- --sftp-sha1sum-command string The command used to read sha1 hashes
- --sftp-shell-type string The type of SSH shell on remote server, if any
- --sftp-skip-links Set to skip any symlinks and any other non regular files
- --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
- --sftp-use-fstat If set use fstat instead of stat
- --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
- --sftp-user string SSH username (default "$USER")
- --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
- --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --sharefile-endpoint string Endpoint for API calls
- --sharefile-root-folder-id string ID of the root folder
- --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
- --sia-api-password string Sia Daemon API Password (obscured)
- --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
- --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
- --sia-user-agent string Siad User Agent (default "Sia-Agent")
- --skip-links Don't warn about skipped symlinks
- --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
- --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
- --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
- --smb-host string SMB server hostname to connect to
- --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --smb-pass string SMB password (obscured)
- --smb-port int SMB port number (default 445)
- --smb-spn string Service principal name
- --smb-user string SMB username (default "$USER")
- --storj-access-grant string Access grant
- --storj-api-key string API key
- --storj-passphrase string Encryption passphrase
- --storj-provider string Choose an authentication method (default "existing")
- --storj-satellite-address string Satellite address (default "us1.storj.io")
- --sugarsync-access-key-id string Sugarsync Access Key ID
- --sugarsync-app-id string Sugarsync App ID
- --sugarsync-authorization string Sugarsync authorization
- --sugarsync-authorization-expiry string Sugarsync authorization expiry
- --sugarsync-deleted-id string Sugarsync deleted folder id
- --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
- --sugarsync-hard-delete Permanently delete files if true
- --sugarsync-private-access-key string Sugarsync Private Access Key
- --sugarsync-refresh-token string Sugarsync refresh token
- --sugarsync-root-id string Sugarsync root id
- --sugarsync-user string Sugarsync user
- --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
- --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
- --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
- --swift-auth string Authentication URL for server (OS_AUTH_URL)
- --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
- --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
- --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
- --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
- --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
- --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
- --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
- --swift-key string API key or password (OS_PASSWORD)
- --swift-leave-parts-on-error If true avoid calling abort upload on a failure
- --swift-no-chunk Don't chunk files during streaming upload
- --swift-no-large-objects Disable support for static and dynamic large objects
- --swift-region string Region name - optional (OS_REGION_NAME)
- --swift-storage-policy string The storage policy to use when creating a new container
- --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
- --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
- --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
- --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
- --swift-user string User name to log in (OS_USERNAME)
- --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
- --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
- --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
- --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
- --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
- --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
- --union-upstreams string List of space separated upstreams
- --uptobox-access-token string Your access token
- --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
- --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
- --webdav-bearer-token-command string Command to run to get a bearer token
- --webdav-encoding string The encoding for the backend
- --webdav-headers CommaSepList Set HTTP headers for all transactions
- --webdav-pass string Password (obscured)
- --webdav-url string URL of http host to connect to
- --webdav-user string User name
- --webdav-vendor string Name of the WebDAV site/service/software you are using
- --yandex-auth-url string Auth server URL
- --yandex-client-id string OAuth Client Id
- --yandex-client-secret string OAuth Client Secret
- --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --yandex-hard-delete Delete files permanently rather than putting them into the trash
- --yandex-token string OAuth Access Token as a JSON blob
- --yandex-token-url string Token server url
- --zoho-auth-url string Auth server URL
- --zoho-client-id string OAuth Client Id
- --zoho-client-secret string OAuth Client Secret
- --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
- --zoho-region string Zoho region to connect to
- --zoho-token string OAuth Access Token as a JSON blob
- --zoho-token-url string Token server url
+ --acd-auth-url string Auth server URL
+ --acd-client-id string OAuth Client Id
+ --acd-client-secret string OAuth Client Secret
+ --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
+ --acd-token string OAuth Access Token as a JSON blob
+ --acd-token-url string Token server url
+ --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-remote string Remote or path to alias
+ --azureblob-access-tier string Access tier of blob: hot, cool or archive
+ --azureblob-account string Azure Storage Account Name
+ --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
+ --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
+ --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
+ --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
+ --azureblob-client-id string The ID of the client in use
+ --azureblob-client-secret string One of the service principal's client secrets
+ --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --azureblob-disable-checksum Don't store MD5 checksum with object metadata
+ --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
+ --azureblob-endpoint string Endpoint for the service
+ --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
+ --azureblob-key string Storage Account Shared Key
+ --azureblob-list-chunk int Size of blob list (default 5000)
+ --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
+ --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-no-check-container If set, don't attempt to check the container exists or create it
+ --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
+ --azureblob-password string The user's password (obscured)
+ --azureblob-public-access string Public access level of a container: blob or container
+ --azureblob-sas-url string SAS URL for container level access only
+ --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
+ --azureblob-tenant string ID of the service principal's tenant. Also called its directory ID
+ --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
+ --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
+ --azureblob-use-emulator Uses local storage emulator if provided as 'true'
+ --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
+ --azureblob-username string User name (usually an email address)
+ --b2-account string Account ID or Application Key ID
+ --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
+ --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-disable-checksum Disable checksums for large (> upload cutoff) files
+ --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-url string Custom endpoint for downloads
+ --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --b2-endpoint string Endpoint for the service
+ --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
+ --b2-key string Application Key
+ --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
+ --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --b2-version-at Time Show file versions as they were at the specified time (default off)
+ --b2-versions Include old versions in directory listings
+ --box-access-token string Box App Primary Access Token
+ --box-auth-url string Auth server URL
+ --box-box-config-file string Box App config.json location
+ --box-box-sub-type string (default "user")
+ --box-client-id string OAuth Client Id
+ --box-client-secret string OAuth Client Secret
+ --box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
+ --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
+ --box-owned-by string Only show items owned by the login (email address) passed in
+ --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
+ --box-token string OAuth Access Token as a JSON blob
+ --box-token-url string Token server url
+ --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
+ --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
+ --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
+ --cache-chunk-path string Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
+ --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
+ --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
+ --cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
+ --cache-db-purge Clear all the cached data for this remote on start
+ --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
+ --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
+ --cache-plex-password string The password of the Plex user (obscured)
+ --cache-plex-url string The URL of the Plex server
+ --cache-plex-username string The username of the Plex user
+ --cache-read-retries int How many times to retry a read from a cache storage (default 10)
+ --cache-remote string Remote to cache
+ --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
+ --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
+ --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
+ --cache-workers int How many workers should run in parallel to download chunks (default 4)
+ --cache-writes Cache file data on writes through the FS
+ --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
+ --chunker-hash-type string Choose how chunker handles hash sums (default "md5")
+ --chunker-remote string Remote to chunk/unchunk
+ --combine-upstreams SpaceSepList Upstreams for combining
+ --compress-level int GZIP compression level (-2 to 9) (default -1)
+ --compress-mode string Compression mode (default "gzip")
+ --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
+ --compress-remote string Remote to compress
+ -L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
+ --crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
+ --crypt-filename-encryption string How to encrypt the filenames (default "standard")
+ --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
+ --crypt-pass-bad-blocks If set this will pass bad blocks through as all 0
+ --crypt-password string Password or pass phrase for encryption (obscured)
+ --crypt-password2 string Password or pass phrase for salt (obscured)
+ --crypt-remote string Remote to encrypt/decrypt
+ --crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
+ --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
+ --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
+ --drive-auth-owner-only Only consider files owned by the authenticated user
+ --drive-auth-url string Auth server URL
+ --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
+ --drive-client-id string Google Application Client Id
+ --drive-client-secret string OAuth Client Secret
+ --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-disable-http2 Disable drive using http2 (default true)
+ --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
+ --drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
+ --drive-formats string Deprecated: See export_formats
+ --drive-impersonate string Impersonate this user when using a service account
+ --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
+ --drive-keep-revision-forever Keep new head revision of each file forever
+ --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
+ --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
+ --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
+ --drive-resource-key string Resource key for accessing a link-shared file
+ --drive-root-folder-id string ID of the root folder
+ --drive-scope string Scope that rclone should use when requesting access from drive
+ --drive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --drive-service-account-credentials string Service Account Credentials JSON blob
+ --drive-service-account-file string Service Account Credentials JSON file path
+ --drive-shared-with-me Only show files that are shared with me
+ --drive-size-as-quota Show sizes as storage quota usage, not actual size
+ --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
+ --drive-skip-dangling-shortcuts If set skip dangling shortcut files
+ --drive-skip-gdocs Skip google documents in all listings
+ --drive-skip-shortcuts If set skip shortcut files
+ --drive-starred-only Only show files that are starred
+ --drive-stop-on-download-limit Make download limit errors be fatal
+ --drive-stop-on-upload-limit Make upload limit errors be fatal
+ --drive-team-drive string ID of the Shared Drive (Team Drive)
+ --drive-token string OAuth Access Token as a JSON blob
+ --drive-token-url string Token server url
+ --drive-trashed-only Only show files that are in the trash
+ --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
+ --drive-use-created-date Use file created date instead of modified date
+ --drive-use-shared-date Use date file was shared instead of modified date
+ --drive-use-trash Send files to the trash instead of deleting permanently (default true)
+ --drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
+ --dropbox-auth-url string Auth server URL
+ --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
+ --dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
+ --dropbox-batch-size int Max number of files in upload batch
+ --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
+ --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
+ --dropbox-client-id string OAuth Client Id
+ --dropbox-client-secret string OAuth Client Secret
+ --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
+ --dropbox-impersonate string Impersonate this user when using a business account
+ --dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --dropbox-shared-files Instructs rclone to work on individual shared files
+ --dropbox-shared-folders Instructs rclone to work on shared folders
+ --dropbox-token string OAuth Access Token as a JSON blob
+ --dropbox-token-url string Token server url
+ --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
+ --fichier-cdn Set if you wish to use CDN download links
+ --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
+ --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
+ --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
+ --fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --filefabric-permanent-token string Permanent Authentication Token
+ --filefabric-root-folder-id string ID of the root folder
+ --filefabric-token string Session Token
+ --filefabric-token-expiry string Token expiry time
+ --filefabric-url string URL of the Enterprise File Fabric to connect to
+ --filefabric-version string Version read from the file fabric
+ --ftp-ask-password Allow asking for FTP password when needed
+ --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
+ --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-disable-epsv Disable using EPSV even if server advertises support
+ --ftp-disable-mlsd Disable using MLSD even if server advertises support
+ --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
+ --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
+ --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
+ --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
+ --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
+ --ftp-host string FTP host to connect to
+ --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --ftp-no-check-certificate Do not verify the TLS certificate of the server
+ --ftp-pass string FTP password (obscured)
+ --ftp-port int FTP port number (default 21)
+ --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
+ --ftp-tls Use Implicit FTPS (FTP over TLS)
+ --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
+ --ftp-user string FTP username (default "$USER")
+ --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
+ --gcs-anonymous Access public buckets and objects without credentials
+ --gcs-auth-url string Auth server URL
+ --gcs-bucket-acl string Access Control List for new buckets
+ --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
+ --gcs-client-id string OAuth Client Id
+ --gcs-client-secret string OAuth Client Secret
+ --gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gcs-endpoint string Endpoint for the service
+ --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --gcs-location string Location for the newly created buckets
+ --gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --gcs-object-acl string Access Control List for new objects
+ --gcs-project-number string Project number
+ --gcs-service-account-file string Service Account Credentials JSON file path
+ --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
+ --gcs-token string OAuth Access Token as a JSON blob
+ --gcs-token-url string Token server url
+ --gcs-user-project string User project
+ --gphotos-auth-url string Auth server URL
+ --gphotos-client-id string OAuth Client Id
+ --gphotos-client-secret string OAuth Client Secret
+ --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gphotos-include-archived Also view and download archived media
+ --gphotos-read-only Set to make the Google Photos backend read only
+ --gphotos-read-size Set to read the size of media items
+ --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
+ --gphotos-token string OAuth Access Token as a JSON blob
+ --gphotos-token-url string Token server url
+ --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
+ --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
+ --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
+ --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
+ --hdfs-namenode string Hadoop name node and port
+ --hdfs-service-principal-name string Kerberos service principal name for the namenode
+ --hdfs-username string Hadoop user name
+ --hidrive-auth-url string Auth server URL
+ --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
+ --hidrive-client-id string OAuth Client Id
+ --hidrive-client-secret string OAuth Client Secret
+ --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
+ --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
+ --hidrive-root-prefix string The root/parent folder for all paths (default "/")
+ --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default "rw")
+ --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default "user")
+ --hidrive-token string OAuth Access Token as a JSON blob
+ --hidrive-token-url string Token server url
+ --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
+ --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-headers CommaSepList Set HTTP headers for all transactions
+ --http-no-head Don't use HEAD requests
+ --http-no-slash Set this if the site doesn't end directories with /
+ --http-url string URL of HTTP host to connect to
+ --internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
+ --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
+ --internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
+ --internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
+ --internetarchive-secret-access-key string IAS3 Secret Key (password)
+ --internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
+ --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
+ --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
+ --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
+ --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
+ --jottacloud-trashed-only Only show files that are in the trash
+ --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --koofr-endpoint string The Koofr API endpoint to use
+ --koofr-mountid string Mount ID of the mount to use
+ --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
+ --koofr-provider string Choose your storage provider
+ --koofr-setmtime Does the backend support setting modification time (default true)
+ --koofr-user string Your user name
+ -l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
+ --local-case-insensitive Force the filesystem to report itself as case insensitive
+ --local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --local-no-check-updated Don't check to see if the files change during upload
+ --local-no-preallocate Disable preallocation of disk space for transferred files
+ --local-no-set-modtime Disable setting modtime
+ --local-no-sparse Disable sparse files for multi-thread downloads
+ --local-nounc Disable UNC (long path names) conversion on Windows
+ --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
+ --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
+ --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
+ --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --mailru-pass string Password (obscured)
+ --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
+ --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
+ --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
+ --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
+ --mailru-user string User name (usually email)
+ --mega-debug Output more debug from Mega
+ --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --mega-hard-delete Delete files permanently rather than putting them into the trash
+ --mega-pass string Password (obscured)
+ --mega-use-https Use HTTPS for transfers
+ --mega-user string User name
+ --netstorage-account string Set the NetStorage account name
+ --netstorage-host string Domain+path of NetStorage host to connect to
+ --netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
+ --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
+ -x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
+ --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
+ --onedrive-auth-url string Auth server URL
+ --onedrive-av-override Allows download of files the server thinks has a virus
+ --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
+ --onedrive-client-id string OAuth Client Id
+ --onedrive-client-secret string OAuth Client Secret
+ --onedrive-drive-id string The ID of the drive to use
+ --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
+ --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
+ --onedrive-hash-type string Specify the hash in use for the backend (default "auto")
+ --onedrive-link-password string Set the password for links created by the link command
+ --onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
+ --onedrive-link-type string Set the type of the links created by the link command (default "view")
+ --onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-no-versions Remove all versions on modifying operations
+ --onedrive-region string Choose national cloud region for OneDrive (default "global")
+ --onedrive-root-folder-id string ID of the root folder
+ --onedrive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --onedrive-token string OAuth Access Token as a JSON blob
+ --onedrive-token-url string Token server url
+ --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --oos-compartment string Object storage compartment OCID
+ --oos-config-file string Path to OCI config file (default "~/.oci/config")
+ --oos-config-profile string Profile name inside the oci config file (default "Default")
+ --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-disable-checksum Don't store MD5 checksum with object metadata
+ --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --oos-endpoint string Endpoint for Object storage API
+ --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --oos-namespace string Object storage namespace
+ --oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --oos-provider string Choose your Auth Provider (default "env_auth")
+ --oos-region string Object storage Region
+ --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
+ --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
+ --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
+ --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
+ --oos-sse-kms-key-id string if using your own master key in vault, this header specifies the
+ --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
+ --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
+ --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
+ --opendrive-password string Password (obscured)
+ --opendrive-username string Username
+ --pcloud-auth-url string Auth server URL
+ --pcloud-client-id string OAuth Client Id
+ --pcloud-client-secret string OAuth Client Secret
+ --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
+ --pcloud-password string Your pcloud password (obscured)
+ --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
+ --pcloud-token string OAuth Access Token as a JSON blob
+ --pcloud-token-url string Token server url
+ --pcloud-username string Your pcloud username
+ --pikpak-auth-url string Auth server URL
+ --pikpak-client-id string OAuth Client Id
+ --pikpak-client-secret string OAuth Client Secret
+ --pikpak-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
+ --pikpak-pass string Pikpak password (obscured)
+ --pikpak-root-folder-id string ID of the root folder
+ --pikpak-token string OAuth Access Token as a JSON blob
+ --pikpak-token-url string Token server url
+ --pikpak-trashed-only Only show files that are in the trash
+ --pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
+ --pikpak-user string Pikpak username
+ --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --qingstor-access-key-id string QingStor Access Key ID
+ --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
+ --qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
+ --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
+ --qingstor-env-auth Get QingStor credentials from runtime
+ --qingstor-secret-access-key string QingStor Secret Access Key (password)
+ --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
+ --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --qingstor-zone string Zone to connect to
+ --s3-access-key-id string AWS Access Key ID
+ --s3-acl string Canned ACL used when creating buckets and storing or copying objects
+ --s3-bucket-acl string Canned ACL used when creating buckets
+ --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --s3-decompress If set this will decompress gzip encoded objects
+ --s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --s3-disable-checksum Don't store MD5 checksum with object metadata
+ --s3-disable-http2 Disable usage of http2 for S3 backends
+ --s3-download-url string Custom endpoint for downloads
+ --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --s3-endpoint string Endpoint for S3 API
+ --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
+ --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
+ --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
+ --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
+ --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
+ --s3-location-constraint string Location constraint - must be set to match the Region
+ --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
+ --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
+ --s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
+ --s3-no-head If set, don't HEAD uploaded objects to check integrity
+ --s3-no-head-object If set, do not do HEAD before GET when getting objects
+ --s3-no-system-metadata Suppress setting and reading of system metadata
+ --s3-profile string Profile to use in the shared credentials file
+ --s3-provider string Choose your S3 provider
+ --s3-region string Region to connect to
+ --s3-requester-pays Enables requester pays option when interacting with S3 bucket
+ --s3-secret-access-key string AWS Secret Access Key (password)
+ --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
+ --s3-session-token string An AWS session token
+ --s3-shared-credentials-file string Path to the shared credentials file
+ --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
+ --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
+ --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
+ --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
+ --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
+ --s3-storage-class string The storage class to use when storing new objects in S3
+ --s3-sts-endpoint string Endpoint for STS
+ --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
+ --s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
+ --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
+ --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
+ --s3-v2-auth If true use v2 authentication
+ --s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-versions Include old versions in directory listings
+ --seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
+ --seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
+ --seafile-library string Name of the library
+ --seafile-library-key string Library password (for encrypted libraries only) (obscured)
+ --seafile-pass string Password (obscured)
+ --seafile-url string URL of seafile host to connect to
+ --seafile-user string User name (usually email address)
+ --sftp-ask-password Allow asking for SFTP password when needed
+ --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
+ --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
+ --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
+ --sftp-disable-concurrent-reads If set don't use concurrent reads
+ --sftp-disable-concurrent-writes If set don't use concurrent writes
+ --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
+ --sftp-host string SSH host to connect to
+ --sftp-host-key-algorithms SpaceSepList Space separated list of host key algorithms, ordered by preference
+ --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
+ --sftp-key-file string Path to PEM-encoded private key file
+ --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
+ --sftp-key-pem string Raw PEM-encoded private key
+ --sftp-key-use-agent When set forces the usage of the ssh-agent
+ --sftp-known-hosts-file string Optional path to known_hosts file
+ --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
+ --sftp-md5sum-command string The command used to read md5 hashes
+ --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
+ --sftp-path-override string Override path used by SSH shell commands
+ --sftp-port int SSH port number (default 22)
+ --sftp-pubkey-file string Optional path to public key file
+ --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
+ --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
+ --sftp-set-modtime Set the modified time on the remote if set (default true)
+ --sftp-sha1sum-command string The command used to read sha1 hashes
+ --sftp-shell-type string The type of SSH shell on remote server, if any
+ --sftp-skip-links Set to skip any symlinks and any other non regular files
+ --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
+ --sftp-use-fstat If set use fstat instead of stat
+ --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
+ --sftp-user string SSH username (default "$USER")
+ --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
+ --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --sharefile-endpoint string Endpoint for API calls
+ --sharefile-root-folder-id string ID of the root folder
+ --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
+ --sia-api-password string Sia Daemon API Password (obscured)
+ --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
+ --sia-user-agent string Siad User Agent (default "Sia-Agent")
+ --skip-links Don't warn about skipped symlinks
+ --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
+ --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
+ --smb-host string SMB server hostname to connect to
+ --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --smb-pass string SMB password (obscured)
+ --smb-port int SMB port number (default 445)
+ --smb-spn string Service principal name
+ --smb-user string SMB username (default "$USER")
+ --storj-access-grant string Access grant
+ --storj-api-key string API key
+ --storj-passphrase string Encryption passphrase
+ --storj-provider string Choose an authentication method (default "existing")
+ --storj-satellite-address string Satellite address (default "us1.storj.io")
+ --sugarsync-access-key-id string Sugarsync Access Key ID
+ --sugarsync-app-id string Sugarsync App ID
+ --sugarsync-authorization string Sugarsync authorization
+ --sugarsync-authorization-expiry string Sugarsync authorization expiry
+ --sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
+ --sugarsync-hard-delete Permanently delete files if true
+ --sugarsync-private-access-key string Sugarsync Private Access Key
+ --sugarsync-refresh-token string Sugarsync refresh token
+ --sugarsync-root-id string Sugarsync root id
+ --sugarsync-user string Sugarsync user
+ --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
+ --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
+ --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
+ --swift-auth string Authentication URL for server (OS_AUTH_URL)
+ --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+ --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+ --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+ --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
+ --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
+ --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
+ --swift-key string API key or password (OS_PASSWORD)
+ --swift-leave-parts-on-error If true avoid calling abort upload on a failure
+ --swift-no-chunk Don't chunk files during streaming upload
+ --swift-no-large-objects Disable support for static and dynamic large objects
+ --swift-region string Region name - optional (OS_REGION_NAME)
+ --swift-storage-policy string The storage policy to use when creating a new container
+ --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
+ --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+ --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+ --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+ --swift-user string User name to log in (OS_USERNAME)
+ --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
+ --union-action-policy string Policy to choose upstream on ACTION category (default "epall")
+ --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
+ --union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
+ --union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
+ --union-upstreams string List of space separated upstreams
+ --uptobox-access-token string Your access token
+ --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
+ --uptobox-private Set to make uploaded files private
+ --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
+ --webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-encoding string The encoding for the backend
+ --webdav-headers CommaSepList Set HTTP headers for all transactions
+ --webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --webdav-pass string Password (obscured)
+ --webdav-url string URL of http host to connect to
+ --webdav-user string User name
+ --webdav-vendor string Name of the WebDAV site/service/software you are using
+ --yandex-auth-url string Auth server URL
+ --yandex-client-id string OAuth Client Id
+ --yandex-client-secret string OAuth Client Secret
+ --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --yandex-hard-delete Delete files permanently rather than putting them into the trash
+ --yandex-token string OAuth Access Token as a JSON blob
+ --yandex-token-url string Token server url
+ --zoho-auth-url string Auth server URL
+ --zoho-client-id string OAuth Client Id
+ --zoho-client-secret string OAuth Client Secret
+ --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
+ --zoho-region string Zoho region to connect to
+ --zoho-token string OAuth Access Token as a JSON blob
+ --zoho-token-url string Token server url
```
diff --git a/docs/content/googlecloudstorage.md b/docs/content/googlecloudstorage.md
index 092549456..80e5b75b8 100644
--- a/docs/content/googlecloudstorage.md
+++ b/docs/content/googlecloudstorage.md
@@ -320,6 +320,19 @@ Properties:
- Type: string
- Required: false
+#### --gcs-user-project
+
+User project.
+
+Optional - needed only for requester pays.
+
+Properties:
+
+- Config: user_project
+- Env Var: RCLONE_GCS_USER_PROJECT
+- Type: string
+- Required: false
+
#### --gcs-service-account-file
Service Account Credentials JSON file path.
@@ -611,6 +624,21 @@ Properties:
- Type: string
- Required: false
+#### --gcs-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is created
+
+Empty folders are unsupported for bucket based remotes, this option creates an empty
+object ending with "/", to persist the folder.
+
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_GCS_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
#### --gcs-no-check-bucket
If set, don't attempt to check the bucket exists or create it.
diff --git a/docs/content/hdfs.md b/docs/content/hdfs.md
index 701590e60..6d22e446b 100644
--- a/docs/content/hdfs.md
+++ b/docs/content/hdfs.md
@@ -205,9 +205,9 @@ Properties:
Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
-checks, and wire encryption is required when communicating the the
-datanodes. Possible values are 'authentication', 'integrity' and
-'privacy'. Used only with KERBEROS enabled.
+checks, and wire encryption are required when communicating with
+the datanodes. Possible values are 'authentication', 'integrity'
+and 'privacy'. Used only with KERBEROS enabled.
Properties:
diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md
index 8e65338e7..aff4cd426 100644
--- a/docs/content/onedrive.md
+++ b/docs/content/onedrive.md
@@ -428,6 +428,8 @@ Properties:
#### --onedrive-server-side-across-configs
+Deprecated: use --server-side-across-configs instead.
+
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND
@@ -531,7 +533,7 @@ Properties:
Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
-default hash which is is QuickXorHash.
+default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
@@ -568,6 +570,30 @@ Properties:
- "none"
- None - don't use any hashes
+#### --onedrive-av-override
+
+Allows download of files the server thinks has a virus.
+
+The onedrive/sharepoint server may check files uploaded with an Anti
+Virus checker. If it detects any potential viruses or malware it will
+block download of the file.
+
+In this case you will see a message like this
+
+ server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
+
+If you are 100% sure you want to download this file anyway then use
+the --onedrive-av-override flag, or av_override = true in the config
+file.
+
+
+Properties:
+
+- Config: av_override
+- Env Var: RCLONE_ONEDRIVE_AV_OVERRIDE
+- Type: bool
+- Default: false
+
#### --onedrive-encoding
The encoding for the backend.
diff --git a/docs/content/oracleobjectstorage.md b/docs/content/oracleobjectstorage.md
index 73cf1920d..0e41ee788 100644
--- a/docs/content/oracleobjectstorage.md
+++ b/docs/content/oracleobjectstorage.md
@@ -611,7 +611,7 @@ Properties:
#### --oos-sse-kms-key-id
-if using using your own master key in vault, this header specifies the
+if using your own master key in vault, this header specifies the
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
diff --git a/docs/content/rc.md b/docs/content/rc.md
index 1cb4dd55b..5f4797800 100644
--- a/docs/content/rc.md
+++ b/docs/content/rc.md
@@ -586,7 +586,7 @@ See the [config dump](/commands/rclone_config_dump/) command for more informatio
**Authentication is required for this call.**
-### config/listremotes: Lists the remotes in the config file. {#config-listremotes}
+### config/listremotes: Lists the remotes in the config file and defined in environment variables. {#config-listremotes}
Returns
- remotes - array of remote names
@@ -1160,9 +1160,9 @@ See the [cleanup](/commands/rclone_cleanup/) command for more information on the
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
**Authentication is required for this call.**
@@ -1357,9 +1357,9 @@ See the [mkdir](/commands/rclone_mkdir/) command for more information on the abo
This takes the following parameters:
-- srcFs - a remote name string e.g. "drive:" for the source
+- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
-- dstFs - a remote name string e.g. "drive2:" for the destination
+- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
**Authentication is required for this call.**
diff --git a/docs/content/s3.md b/docs/content/s3.md
index 52b2ee0f5..bbaaff602 100644
--- a/docs/content/s3.md
+++ b/docs/content/s3.md
@@ -649,7 +649,7 @@ A simple solution is to set the `--s3-upload-cutoff 0` and force all the files t
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
### Standard options
-Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
#### --s3-provider
@@ -666,18 +666,20 @@ Properties:
- Amazon Web Services (AWS) S3
- "Alibaba"
- Alibaba Cloud Object Storage System (OSS) formerly Aliyun
+ - "ArvanCloud"
+ - Arvan Cloud Object Storage (AOS)
- "Ceph"
- Ceph Object Storage
- "ChinaMobile"
- China Mobile Ecloud Elastic Object Storage (EOS)
- "Cloudflare"
- Cloudflare R2 Storage
- - "ArvanCloud"
- - Arvan Cloud Object Storage (AOS)
- "DigitalOcean"
- DigitalOcean Spaces
- "Dreamhost"
- Dreamhost DreamObjects
+ - "GCS"
+ - Google Cloud Storage
- "HuaweiOBS"
- Huawei Object Storage Service
- "IBMCOS"
@@ -694,6 +696,8 @@ Properties:
- Minio Object Storage
- "Netease"
- Netease Object Storage (NOS)
+ - "Petabox"
+ - Petabox Object Storage
- "RackCorp"
- RackCorp Object Storage
- "Scaleway"
@@ -1033,6 +1037,30 @@ Properties:
#### --s3-region
+Region where your bucket will be created and your data stored.
+
+
+Properties:
+
+- Config: region
+- Env Var: RCLONE_S3_REGION
+- Provider: Petabox
+- Type: string
+- Required: false
+- Examples:
+ - "us-east-1"
+ - US East (N. Virginia)
+ - "eu-central-1"
+ - Europe (Frankfurt)
+ - "ap-southeast-1"
+ - Asia Pacific (Singapore)
+ - "me-south-1"
+ - Middle East (Bahrain)
+ - "sa-east-1"
+ - South America (São Paulo)
+
+#### --s3-region
+
Region to connect to.
Leave blank if you are using an S3 clone and you don't have a region.
@@ -1041,7 +1069,7 @@ Properties:
- Config: region
- Env Var: RCLONE_S3_REGION
-- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
+- Provider: !AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
- Type: string
- Required: false
- Examples:
@@ -1152,10 +1180,10 @@ Properties:
- Type: string
- Required: false
- Examples:
- - "s3.ir-thr-at1.arvanstorage.com"
+ - "s3.ir-thr-at1.arvanstorage.ir"
- The default endpoint - a good choice if you are unsure.
- - Tehran Iran (Asiatech)
- - "s3.ir-tbz-sh1.arvanstorage.com"
+ - Tehran Iran (Simin)
+ - "s3.ir-tbz-sh1.arvanstorage.ir"
- Tabriz Iran (Shahriar)
#### --s3-endpoint
@@ -1320,6 +1348,33 @@ Properties:
#### --s3-endpoint
+Endpoint for Petabox S3 Object Storage.
+
+Specify the endpoint from the same region.
+
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: Petabox
+- Type: string
+- Required: true
+- Examples:
+ - "s3.petabox.io"
+ - US East (N. Virginia)
+ - "s3.us-east-1.petabox.io"
+ - US East (N. Virginia)
+ - "s3.eu-central-1.petabox.io"
+ - Europe (Frankfurt)
+ - "s3.ap-southeast-1.petabox.io"
+ - Asia Pacific (Singapore)
+ - "s3.me-south-1.petabox.io"
+ - Middle East (Bahrain)
+ - "s3.sa-east-1.petabox.io"
+ - South America (São Paulo)
+
+#### --s3-endpoint
+
Endpoint for Liara Object Storage API.
Properties:
@@ -1480,6 +1535,21 @@ Properties:
#### --s3-endpoint
+Endpoint for Google Cloud Storage.
+
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_S3_ENDPOINT
+- Provider: GCS
+- Type: string
+- Required: false
+- Examples:
+ - "https://storage.googleapis.com"
+ - Google Cloud Storage endpoint
+
+#### --s3-endpoint
+
Endpoint for Storj Gateway.
Properties:
@@ -1632,7 +1702,7 @@ Properties:
- Config: endpoint
- Env Var: RCLONE_S3_ENDPOINT
-- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu
+- Provider: !AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox
- Type: string
- Required: false
- Examples:
@@ -1686,8 +1756,10 @@ Properties:
- Wasabi AP Southeast 2 (Sydney)
- "storage.iran.liara.space"
- Liara Iran endpoint
- - "s3.ir-thr-at1.arvanstorage.com"
- - ArvanCloud Tehran Iran (Asiatech) endpoint
+ - "s3.ir-thr-at1.arvanstorage.ir"
+ - ArvanCloud Tehran Iran (Simin) endpoint
+ - "s3.ir-tbz-sh1.arvanstorage.ir"
+ - ArvanCloud Tabriz Iran (Shahriar) endpoint
#### --s3-location-constraint
@@ -1844,7 +1916,7 @@ Properties:
- Required: false
- Examples:
- "ir-thr-at1"
- - Tehran Iran (Asiatech)
+ - Tehran Iran (Simin)
- "ir-tbz-sh1"
- Tabriz Iran (Shahriar)
@@ -2018,7 +2090,7 @@ Properties:
- Config: location_constraint
- Env Var: RCLONE_S3_LOCATION_CONSTRAINT
-- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS
+- Provider: !AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox
- Type: string
- Required: false
@@ -2263,9 +2335,15 @@ Properties:
- "STANDARD"
- The Standard class for any upload.
- Suitable for on-demand content like streaming or CDN.
+ - Available in all regions.
- "GLACIER"
- Archived storage.
- Prices are lower, but it needs to be restored first to be accessed.
+ - Available in FR-PAR and NL-AMS regions.
+ - "ONEZONE_IA"
+ - One Zone - Infrequent Access.
+ - A good choice for storing secondary backup copies or easily re-creatable data.
+ - Available in the FR-PAR region only.
#### --s3-storage-class
@@ -2290,7 +2368,7 @@ Properties:
### Advanced options
-Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
#### --s3-bucket-acl
@@ -2843,6 +2921,21 @@ Properties:
- Type: string
- Required: false
+#### --s3-directory-markers
+
+Upload an empty object with a trailing slash when a new directory is created
+
+Empty folders are unsupported for bucket based remotes, this option creates an empty
+object ending with "/", to persist the folder.
+
+
+Properties:
+
+- Config: directory_markers
+- Env Var: RCLONE_S3_DIRECTORY_MARKERS
+- Type: bool
+- Default: false
+
#### --s3-use-multipart-etag
Whether to use ETag in multipart uploads for verification
@@ -2958,6 +3051,30 @@ Properties:
- Type: Tristate
- Default: unset
+#### --s3-use-accept-encoding-gzip
+
+Whether to send `Accept-Encoding: gzip` header.
+
+By default, rclone will append `Accept-Encoding: gzip` to the request to download
+compressed objects whenever possible.
+
+However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
+the signature of the request.
+
+A symptom of this would be receiving errors like
+
+ SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
+
+In this case, you might want to try disabling this option.
+
+
+Properties:
+
+- Config: use_accept_encoding_gzip
+- Env Var: RCLONE_S3_USE_ACCEPT_ENCODING_GZIP
+- Type: Tristate
+- Default: unset
+
#### --s3-no-system-metadata
Suppress setting and reading of system metadata
@@ -3029,7 +3146,7 @@ to normal storage.
Usage Examples:
- rclone backend restore s3:bucket/path/to/ --include /object [-o priority=PRIORITY] [-o lifetime=DAYS]
+ rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS]
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
diff --git a/docs/content/sftp.md b/docs/content/sftp.md
index bff2796be..643d46765 100644
--- a/docs/content/sftp.md
+++ b/docs/content/sftp.md
@@ -850,7 +850,7 @@ Pass multiple variables space separated, eg
VAR1=value VAR2=value
-and pass variables with spaces in in quotes, eg
+and pass variables with spaces in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
@@ -921,6 +921,26 @@ Properties:
- Type: SpaceSepList
- Default:
+#### --sftp-host-key-algorithms
+
+Space separated list of host key algorithms, ordered by preference.
+
+At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
+
+Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
+
+Example:
+
+ ssh-ed25519 ssh-rsa ssh-dss
+
+
+Properties:
+
+- Config: host_key_algorithms
+- Env Var: RCLONE_SFTP_HOST_KEY_ALGORITHMS
+- Type: SpaceSepList
+- Default:
+
{{< rem autogenerated options stop >}}
## Limitations
diff --git a/docs/content/swift.md b/docs/content/swift.md
index 7690a530f..4b4817f4a 100644
--- a/docs/content/swift.md
+++ b/docs/content/swift.md
@@ -315,6 +315,7 @@ Properties:
- OVH
- "https://authenticate.ain.net"
- Blomp Cloud Storage
+
#### --swift-user-id
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
@@ -490,7 +491,7 @@ Properties:
### Advanced options
-Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage , Memset Memstore, OVH)).
+Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
#### --swift-leave-parts-on-error
diff --git a/docs/content/uptobox.md b/docs/content/uptobox.md
index 7f6e750ab..ed717d6c2 100644
--- a/docs/content/uptobox.md
+++ b/docs/content/uptobox.md
@@ -122,6 +122,17 @@ Properties:
Here are the Advanced options specific to uptobox (Uptobox).
+#### --uptobox-private
+
+Set to make uploaded files private
+
+Properties:
+
+- Config: private
+- Env Var: RCLONE_UPTOBOX_PRIVATE
+- Type: bool
+- Default: false
+
#### --uptobox-encoding
The encoding for the backend.
diff --git a/docs/content/webdav.md b/docs/content/webdav.md
index ee26f8269..30042e0e2 100644
--- a/docs/content/webdav.md
+++ b/docs/content/webdav.md
@@ -139,6 +139,8 @@ Properties:
- Type: string
- Required: false
- Examples:
+ - "fastmail"
+ - Fastmail Files
- "nextcloud"
- Nextcloud
- "owncloud"
@@ -238,6 +240,34 @@ Properties:
- Type: CommaSepList
- Default:
+#### --webdav-pacer-min-sleep
+
+Minimum time to sleep between API calls.
+
+Properties:
+
+- Config: pacer_min_sleep
+- Env Var: RCLONE_WEBDAV_PACER_MIN_SLEEP
+- Type: Duration
+- Default: 10ms
+
+#### --webdav-nextcloud-chunk-size
+
+Nextcloud upload chunk size.
+
+We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances.
+See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
+
+Set to 0 to disable chunked uploading.
+
+
+Properties:
+
+- Config: nextcloud_chunk_size
+- Env Var: RCLONE_WEBDAV_NEXTCLOUD_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 10Mi
+
{{< rem autogenerated options stop >}}
## Provider notes
diff --git a/go.sum b/go.sum
index c7a505817..e0765c719 100644
--- a/go.sum
+++ b/go.sum
@@ -39,24 +39,16 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
-github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU=
-github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -131,7 +123,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00 h1:xJBhC00smQpSZw3Kr0ErMUBXhUSjYoLRm2szxdbRBL0=
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00/go.mod h1:nNICngOdmNImBb/vuL+dSc0aIg3ryNATpjxThNoPw4g=
diff --git a/rclone.1 b/rclone.1
index ca3b34396..92ff6ca63 100644
--- a/rclone.1
+++ b/rclone.1
@@ -1,7 +1,7 @@
.\"t
.\" Automatically generated by Pandoc 2.9.2.1
.\"
-.TH "rclone" "1" "Mar 14, 2023" "User Manual" ""
+.TH "rclone" "1" "Jun 30, 2023" "User Manual" ""
.hy
.SH Rclone syncs your files to cloud storage
.PP
@@ -175,6 +175,8 @@ Dropbox
.IP \[bu] 2
Enterprise File Fabric
.IP \[bu] 2
+Fastmail Files
+.IP \[bu] 2
FTP
.IP \[bu] 2
Google Cloud Storage
@@ -223,6 +225,8 @@ Nextcloud
.IP \[bu] 2
OVH
.IP \[bu] 2
+Blomp Cloud Storage
+.IP \[bu] 2
OpenDrive
.IP \[bu] 2
OpenStack Swift
@@ -235,6 +239,10 @@ ownCloud
.IP \[bu] 2
pCloud
.IP \[bu] 2
+Petabox
+.IP \[bu] 2
+PikPak
+.IP \[bu] 2
premiumize.me
.IP \[bu] 2
put.io
@@ -524,12 +532,22 @@ comes pre-installed with the latest versions of Windows.
If not, update the App
Installer (https://www.microsoft.com/p/app-installer/9nblggh4nns1)
package from the Microsoft store.
+.PP
+To install rclone
.IP
.nf
\f[C]
winget install Rclone.Rclone
\f[R]
.fi
+.PP
+To uninstall rclone
+.IP
+.nf
+\f[C]
+winget uninstall Rclone.Rclone --force
+\f[R]
+.fi
.SS Chocolatey package manager
.PP
Make sure you have Choco (https://chocolatey.org/) installed
@@ -667,10 +685,16 @@ Here are some commands tested on an Ubuntu 18.04.3 host:
# config on host at \[ti]/.config/rclone/rclone.conf
# data on host at \[ti]/data
+# add a remote interactively
+docker run --rm -it \[rs]
+ --volume \[ti]/.config/rclone:/config/rclone \[rs]
+ --user $(id -u):$(id -g) \[rs]
+ rclone/rclone \[rs]
+ config
+
# make sure the config is ok by listing the remotes
docker run --rm \[rs]
--volume \[ti]/.config/rclone:/config/rclone \[rs]
- --volume \[ti]/data:/data:shared \[rs]
--user $(id -u):$(id -g) \[rs]
rclone/rclone \[rs]
listremotes
@@ -1118,7 +1142,7 @@ Microsoft Azure Blob Storage (https://rclone.org/azureblob/)
.IP \[bu] 2
Microsoft OneDrive (https://rclone.org/onedrive/)
.IP \[bu] 2
-OpenStack Swift / Rackspace Cloudfiles / Memset
+OpenStack Swift / Rackspace Cloudfiles / Blomp Cloud Storage / Memset
Memstore (https://rclone.org/swift/)
.IP \[bu] 2
OpenDrive (https://rclone.org/opendrive/)
@@ -1127,6 +1151,8 @@ Oracle Object Storage (https://rclone.org/oracleobjectstorage/)
.IP \[bu] 2
Pcloud (https://rclone.org/pcloud/)
.IP \[bu] 2
+PikPak (https://rclone.org/pikpak/)
+.IP \[bu] 2
premiumize.me (https://rclone.org/premiumizeme/)
.IP \[bu] 2
put.io (https://rclone.org/putio/)
@@ -1666,7 +1692,7 @@ It doesn\[aq]t alter the source or destination.
.PP
For the crypt (https://rclone.org/crypt/) remote there is a dedicated
command, cryptcheck (https://rclone.org/commands/rclone_cryptcheck/),
-that are able to check the checksums of the crypted files.
+that are able to check the checksums of the encrypted files.
.PP
If you supply the \f[C]--size-only\f[R] flag, it will only compare the
sizes not the hashes as well.
@@ -1714,6 +1740,10 @@ different.
.IP \[bu] 2
\f[C]! path\f[R] means there was an error reading or hashing the source
or dest.
+.PP
+The default number of parallel checks is 8.
+See the --checkers=N (https://rclone.org/docs/#checkers-n) option for
+more information.
.IP
.nf
\f[C]
@@ -2078,7 +2108,7 @@ Recurses by default, use \f[C]--max-depth 1\f[R] to stop the recursion.
.PP
Some backends do not always provide file sizes, see for example Google
Photos (https://rclone.org/googlephotos/#size) and Google
-Drive (https://rclone.org/drive/#limitations-of-google-docs).
+Docs (https://rclone.org/drive/#limitations-of-google-docs).
Rclone will then show a notice in the log indicating how many such files
were encountered, and count them in as empty files in the output of the
size command.
@@ -2666,6 +2696,31 @@ Use the \f[C]--head\f[R] flag to print characters only at the start,
\f[C]--count\f[R] to print a section in the middle.
Note that if offset is negative it will count from the end, so
\f[C]--offset -1 --count 1\f[R] is equivalent to \f[C]--tail 1\f[R].
+.PP
+Use the \f[C]--separator\f[R] flag to print a separator value between
+files.
+Be sure to shell-escape special characters.
+For example, to print a newline between files, use:
+.IP \[bu] 2
+bash:
+.RS 2
+.IP
+.nf
+\f[C]
+rclone --include \[dq]*.txt\[dq] --separator $\[aq]\[rs]n\[aq] cat remote:path/to/dir
+\f[R]
+.fi
+.RE
+.IP \[bu] 2
+powershell:
+.RS 2
+.IP
+.nf
+\f[C]
+rclone --include \[dq]*.txt\[dq] --separator \[dq]\[ga]n\[dq] cat remote:path/to/dir
+\f[R]
+.fi
+.RE
.IP
.nf
\f[C]
@@ -2676,12 +2731,13 @@ rclone cat remote:path [flags]
.IP
.nf
\f[C]
- --count int Only print N characters (default -1)
- --discard Discard the output instead of printing
- --head int Only print the first N characters
- -h, --help help for cat
- --offset int Start printing at offset N (or from end if -ve)
- --tail int Only print the last N characters
+ --count int Only print N characters (default -1)
+ --discard Discard the output instead of printing
+ --head int Only print the first N characters
+ -h, --help help for cat
+ --offset int Start printing at offset N (or from end if -ve)
+ --separator string Separator to use between objects when printing multiple files
+ --tail int Only print the last N characters
\f[R]
.fi
.PP
@@ -2741,6 +2797,10 @@ different.
.IP \[bu] 2
\f[C]! path\f[R] means there was an error reading or hashing the source
or dest.
+.PP
+The default number of parallel checks is 8.
+See the --checkers=N (https://rclone.org/docs/#checkers-n) option for
+more information.
.IP
.nf
\f[C]
@@ -2771,12 +2831,11 @@ rclone (https://rclone.org/commands/rclone/) - Show help for rclone
commands, flags and backends.
.SH rclone completion
.PP
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
.SS Synopsis
.PP
-Generate the autocompletion script for rclone for the specified shell.
-See each sub-command\[aq]s help for details on how to use the generated
-script.
+Generates a shell completion script for rclone.
+Run with \f[C]--help\f[R] to list the supported shells.
.SS Options
.IP
.nf
@@ -2793,68 +2852,56 @@ rclone (https://rclone.org/commands/rclone/) - Show help for rclone
commands, flags and backends.
.IP \[bu] 2
rclone completion
-bash (https://rclone.org/commands/rclone_completion_bash/) - Generate
-the autocompletion script for bash
+bash (https://rclone.org/commands/rclone_completion_bash/) - Output bash
+completion script for rclone.
.IP \[bu] 2
rclone completion
-fish (https://rclone.org/commands/rclone_completion_fish/) - Generate
-the autocompletion script for fish
+fish (https://rclone.org/commands/rclone_completion_fish/) - Output fish
+completion script for rclone.
.IP \[bu] 2
rclone completion
-powershell (https://rclone.org/commands/rclone_completion_powershell/) -
-Generate the autocompletion script for powershell
-.IP \[bu] 2
-rclone completion
-zsh (https://rclone.org/commands/rclone_completion_zsh/) - Generate the
-autocompletion script for zsh
+zsh (https://rclone.org/commands/rclone_completion_zsh/) - Output zsh
+completion script for rclone.
.SH rclone completion bash
.PP
-Generate the autocompletion script for bash
+Output bash completion script for rclone.
.SS Synopsis
.PP
-Generate the autocompletion script for the bash shell.
+Generates a bash shell autocompletion script for rclone.
.PP
-This script depends on the \[aq]bash-completion\[aq] package.
-If it is not installed already, you can install it via your OS\[aq]s
-package manager.
-.PP
-To load completions in your current shell session:
+This writes to /etc/bash_completion.d/rclone by default so will probably
+need to be run with sudo or as root, e.g.
.IP
.nf
\f[C]
-source <(rclone completion bash)
+sudo rclone genautocomplete bash
\f[R]
.fi
.PP
-To load completions for every new session, execute once:
-.SS Linux:
+Logout and login again to use the autocompletion scripts, or source them
+directly
.IP
.nf
\f[C]
-rclone completion bash > /etc/bash_completion.d/rclone
-\f[R]
-.fi
-.SS macOS:
-.IP
-.nf
-\f[C]
-rclone completion bash > $(brew --prefix)/etc/bash_completion.d/rclone
+\&. /etc/bash_completion
\f[R]
.fi
.PP
-You will need to start a new shell for this setup to take effect.
+If you supply a command line argument the script will be written there.
+.PP
+If output_file is \[dq]-\[dq], then the output will be written to
+stdout.
.IP
.nf
\f[C]
-rclone completion bash
+rclone completion bash [output_file] [flags]
\f[R]
.fi
.SS Options
.IP
.nf
\f[C]
- -h, --help help for bash
- --no-descriptions disable completion descriptions
+ -h, --help help for bash
\f[R]
.fi
.PP
@@ -2863,43 +2910,47 @@ not listed here.
.SS SEE ALSO
.IP \[bu] 2
rclone completion (https://rclone.org/commands/rclone_completion/) -
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
.SH rclone completion fish
.PP
-Generate the autocompletion script for fish
+Output fish completion script for rclone.
.SS Synopsis
.PP
-Generate the autocompletion script for the fish shell.
+Generates a fish autocompletion script for rclone.
.PP
-To load completions in your current shell session:
+This writes to /etc/fish/completions/rclone.fish by default so will
+probably need to be run with sudo or as root, e.g.
.IP
.nf
\f[C]
-rclone completion fish | source
+sudo rclone genautocomplete fish
\f[R]
.fi
.PP
-To load completions for every new session, execute once:
+Logout and login again to use the autocompletion scripts, or source them
+directly
.IP
.nf
\f[C]
-rclone completion fish > \[ti]/.config/fish/completions/rclone.fish
+\&. /etc/fish/completions/rclone.fish
\f[R]
.fi
.PP
-You will need to start a new shell for this setup to take effect.
+If you supply a command line argument the script will be written there.
+.PP
+If output_file is \[dq]-\[dq], then the output will be written to
+stdout.
.IP
.nf
\f[C]
-rclone completion fish [flags]
+rclone completion fish [output_file] [flags]
\f[R]
.fi
.SS Options
.IP
.nf
\f[C]
- -h, --help help for fish
- --no-descriptions disable completion descriptions
+ -h, --help help for fish
\f[R]
.fi
.PP
@@ -2908,11 +2959,11 @@ not listed here.
.SS SEE ALSO
.IP \[bu] 2
rclone completion (https://rclone.org/commands/rclone_completion/) -
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
.SH rclone completion powershell
.PP
Generate the autocompletion script for powershell
-.SS Synopsis
+.SH Synopsis
.PP
Generate the autocompletion script for powershell.
.PP
@@ -2932,7 +2983,7 @@ command to your powershell profile.
rclone completion powershell [flags]
\f[R]
.fi
-.SS Options
+.SH Options
.IP
.nf
\f[C]
@@ -2943,64 +2994,50 @@ rclone completion powershell [flags]
.PP
See the global flags page (https://rclone.org/flags/) for global options
not listed here.
-.SS SEE ALSO
+.SH SEE ALSO
.IP \[bu] 2
rclone completion (https://rclone.org/commands/rclone_completion/) -
Generate the autocompletion script for the specified shell
.SH rclone completion zsh
.PP
-Generate the autocompletion script for zsh
+Output zsh completion script for rclone.
.SS Synopsis
.PP
-Generate the autocompletion script for the zsh shell.
+Generates a zsh autocompletion script for rclone.
.PP
-If shell completion is not already enabled in your environment you will
-need to enable it.
-You can execute the following once:
+This writes to /usr/share/zsh/vendor-completions/_rclone by default so
+will probably need to be run with sudo or as root, e.g.
.IP
.nf
\f[C]
-echo \[dq]autoload -U compinit; compinit\[dq] >> \[ti]/.zshrc
+sudo rclone genautocomplete zsh
\f[R]
.fi
.PP
-To load completions in your current shell session:
+Logout and login again to use the autocompletion scripts, or source them
+directly
.IP
.nf
\f[C]
-source <(rclone completion zsh); compdef _rclone rclone
+autoload -U compinit && compinit
\f[R]
.fi
.PP
-To load completions for every new session, execute once:
-.SS Linux:
-.IP
-.nf
-\f[C]
-rclone completion zsh > \[dq]${fpath[1]}/_rclone\[dq]
-\f[R]
-.fi
-.SS macOS:
-.IP
-.nf
-\f[C]
-rclone completion zsh > $(brew --prefix)/share/zsh/site-functions/_rclone
-\f[R]
-.fi
+If you supply a command line argument the script will be written there.
.PP
-You will need to start a new shell for this setup to take effect.
+If output_file is \[dq]-\[dq], then the output will be written to
+stdout.
.IP
.nf
\f[C]
-rclone completion zsh [flags]
+rclone completion zsh [output_file] [flags]
\f[R]
.fi
.SS Options
.IP
.nf
\f[C]
- -h, --help help for zsh
- --no-descriptions disable completion descriptions
+ -h, --help help for zsh
\f[R]
.fi
.PP
@@ -3009,7 +3046,7 @@ not listed here.
.SS SEE ALSO
.IP \[bu] 2
rclone completion (https://rclone.org/commands/rclone_completion/) -
-Generate the autocompletion script for the specified shell
+Output completion script for a given shell.
.SH rclone config create
.PP
Create a new remote with name, type and options.
@@ -3769,14 +3806,14 @@ rclone (https://rclone.org/commands/rclone/) - Show help for rclone
commands, flags and backends.
.SH rclone cryptcheck
.PP
-Cryptcheck checks the integrity of a crypted remote.
+Cryptcheck checks the integrity of an encrypted remote.
.SS Synopsis
.PP
rclone cryptcheck checks a remote against a
crypted (https://rclone.org/crypt/) remote.
This is the equivalent of running rclone
check (https://rclone.org/commands/rclone_check/), but able to check the
-checksums of the crypted remote.
+checksums of the encrypted remote.
.PP
For it to work the underlying remote of the cryptedremote must support
some kind of checksum.
@@ -3838,6 +3875,10 @@ different.
.IP \[bu] 2
\f[C]! path\f[R] means there was an error reading or hashing the source
or dest.
+.PP
+The default number of parallel checks is 8.
+See the --checkers=N (https://rclone.org/docs/#checkers-n) option for
+more information.
.IP
.nf
\f[C]
@@ -3944,11 +3985,11 @@ commands, flags and backends.
.SH rclone genautocomplete
.PP
Output completion script for a given shell.
-.SS Synopsis
+.SH Synopsis
.PP
Generates a shell completion script for rclone.
Run with \f[C]--help\f[R] to list the supported shells.
-.SS Options
+.SH Options
.IP
.nf
\f[C]
@@ -3958,7 +3999,7 @@ Run with \f[C]--help\f[R] to list the supported shells.
.PP
See the global flags page (https://rclone.org/flags/) for global options
not listed here.
-.SS SEE ALSO
+.SH SEE ALSO
.IP \[bu] 2
rclone (https://rclone.org/commands/rclone/) - Show help for rclone
commands, flags and backends.
@@ -3977,7 +4018,7 @@ zsh completion script for rclone.
.SH rclone genautocomplete bash
.PP
Output bash completion script for rclone.
-.SS Synopsis
+.SH Synopsis
.PP
Generates a bash shell autocompletion script for rclone.
.PP
@@ -4009,7 +4050,7 @@ stdout.
rclone genautocomplete bash [output_file] [flags]
\f[R]
.fi
-.SS Options
+.SH Options
.IP
.nf
\f[C]
@@ -4019,7 +4060,7 @@ rclone genautocomplete bash [output_file] [flags]
.PP
See the global flags page (https://rclone.org/flags/) for global options
not listed here.
-.SS SEE ALSO
+.SH SEE ALSO
.IP \[bu] 2
rclone
genautocomplete (https://rclone.org/commands/rclone_genautocomplete/) -
@@ -4027,7 +4068,7 @@ Output completion script for a given shell.
.SH rclone genautocomplete fish
.PP
Output fish completion script for rclone.
-.SS Synopsis
+.SH Synopsis
.PP
Generates a fish autocompletion script for rclone.
.PP
@@ -4059,7 +4100,7 @@ stdout.
rclone genautocomplete fish [output_file] [flags]
\f[R]
.fi
-.SS Options
+.SH Options
.IP
.nf
\f[C]
@@ -4069,7 +4110,7 @@ rclone genautocomplete fish [output_file] [flags]
.PP
See the global flags page (https://rclone.org/flags/) for global options
not listed here.
-.SS SEE ALSO
+.SH SEE ALSO
.IP \[bu] 2
rclone
genautocomplete (https://rclone.org/commands/rclone_genautocomplete/) -
@@ -4077,7 +4118,7 @@ Output completion script for a given shell.
.SH rclone genautocomplete zsh
.PP
Output zsh completion script for rclone.
-.SS Synopsis
+.SH Synopsis
.PP
Generates a zsh autocompletion script for rclone.
.PP
@@ -4109,7 +4150,7 @@ stdout.
rclone genautocomplete zsh [output_file] [flags]
\f[R]
.fi
-.SS Options
+.SH Options
.IP
.nf
\f[C]
@@ -4119,7 +4160,7 @@ rclone genautocomplete zsh [output_file] [flags]
.PP
See the global flags page (https://rclone.org/flags/) for global options
not listed here.
-.SS SEE ALSO
+.SH SEE ALSO
.IP \[bu] 2
rclone
genautocomplete (https://rclone.org/commands/rclone_genautocomplete/) -
@@ -4283,7 +4324,8 @@ rclone (https://rclone.org/commands/rclone/) - Show help for rclone
commands, flags and backends.
.SH rclone listremotes
.PP
-List all the remotes in the config file.
+List all the remotes in the config file and defined in environment
+variables.
.SS Synopsis
.PP
rclone listremotes lists all the available remotes from the config file.
@@ -4998,6 +5040,23 @@ macFUSE is a traditional FUSE driver utilizing a macOS kernel extension
(kext).
FUSE-T is an alternative FUSE system which \[dq]mounts\[dq] via an NFSv4
local server.
+.SS macFUSE Notes
+.PP
+If installing macFUSE using dmg
+packages (https://github.com/osxfuse/osxfuse/releases) from the website,
+rclone will locate the macFUSE libraries without any further
+intervention.
+If however, macFUSE is installed using the
+macports (https://www.macports.org/) package manager, the following
+addition steps are required.
+.IP
+.nf
+\f[C]
+sudo mkdir /usr/local/lib
+cd /usr/local/lib
+sudo ln -s /opt/local/lib/libfuse.2.dylib
+\f[R]
+.fi
.SS FUSE-T Limitations, Caveats, and Notes
.PP
There are some limitations, caveats, and notes about how it works.
@@ -5159,12 +5218,12 @@ or create systemd mount units:
\f[C]
# /etc/systemd/system/mnt-data.mount
[Unit]
-After=network-online.target
+Description=Mount for /mnt/data
[Mount]
Type=rclone
What=sftp1:subdir
Where=/mnt/data
-Options=rw,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
\f[R]
.fi
.PP
@@ -5174,8 +5233,7 @@ optionally accompanied by systemd automount unit
\f[C]
# /etc/systemd/system/mnt-data.automount
[Unit]
-After=network-online.target
-Before=remote-fs.target
+Description=AutoMount for /mnt/data
[Automount]
Where=/mnt/data
TimeoutIdleSec=600
@@ -5324,7 +5382,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -5352,6 +5410,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -5647,6 +5719,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for mount
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don\[aq]t compare checksums on up/download
--no-modtime Don\[aq]t read/write the modification time (can speed things up)
@@ -5658,7 +5731,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -6041,10 +6114,12 @@ support streaming.
If the size of the stream is different in length to the \f[C]--size\f[R]
passed in then the transfer will likely fail.
.PP
-Note that the upload can also not be retried because the data is not
-kept around until the upload succeeds.
-If you need to transfer a lot of data, you\[aq]re better off caching
-locally and then \f[C]rclone move\f[R] it to the destination.
+Note that the upload cannot be retried because the data is not stored.
+If the backend supports multipart uploading then individual chunks can
+be retried.
+If you need to transfer a lot of data, you may be better off caching it
+locally and then \f[C]rclone move\f[R] it to the destination which can
+use retries.
.IP
.nf
\f[C]
@@ -6083,61 +6158,63 @@ See the rc documentation (https://rclone.org/rc/) for more info on the
rc flags.
.SS Server options
.PP
-Use \f[C]--addr\f[R] to specify which IP address and port the server
-should listen on, eg \f[C]--addr 1.2.3.4:8000\f[R] or
-\f[C]--addr :8080\f[R] to listen to all IPs.
+Use \f[C]--rc-addr\f[R] to specify which IP address and port the server
+should listen on, eg \f[C]--rc-addr 1.2.3.4:8000\f[R] or
+\f[C]--rc-addr :8080\f[R] to listen to all IPs.
By default it only listens on localhost.
You can use port :0 to let the OS choose an available port.
.PP
-If you set \f[C]--addr\f[R] to listen on a public or LAN accessible IP
-address then using Authentication is advised - see the next section for
-info.
+If you set \f[C]--rc-addr\f[R] to listen on a public or LAN accessible
+IP address then using Authentication is advised - see the next section
+for info.
.PP
You can use a unix socket by setting the url to
\f[C]unix:///path/to/socket\f[R] or just by using an absolute path name.
Note that unix sockets bypass the authentication - this is expected to
be done with file system permissions.
.PP
-\f[C]--addr\f[R] may be repeated to listen on multiple
+\f[C]--rc-addr\f[R] may be repeated to listen on multiple
IPs/ports/sockets.
.PP
-\f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can
-be used to control the timeouts on the server.
+\f[C]--rc-server-read-timeout\f[R] and
+\f[C]--rc-server-write-timeout\f[R] can be used to control the timeouts
+on the server.
Note that this is the total time for a transfer.
.PP
-\f[C]--max-header-bytes\f[R] controls the maximum number of bytes the
+\f[C]--rc-max-header-bytes\f[R] controls the maximum number of bytes the
server will accept in the HTTP header.
.PP
-\f[C]--baseurl\f[R] controls the URL prefix that rclone serves from.
+\f[C]--rc-baseurl\f[R] controls the URL prefix that rclone serves from.
By default rclone will serve from the root.
-If you used \f[C]--baseurl \[dq]/rclone\[dq]\f[R] then rclone would
+If you used \f[C]--rc-baseurl \[dq]/rclone\[dq]\f[R] then rclone would
serve from a URL starting with \[dq]/rclone/\[dq].
This is useful if you wish to proxy rclone serve.
Rclone automatically inserts leading and trailing \[dq]/\[dq] on
-\f[C]--baseurl\f[R], so \f[C]--baseurl \[dq]rclone\[dq]\f[R],
-\f[C]--baseurl \[dq]/rclone\[dq]\f[R] and
-\f[C]--baseurl \[dq]/rclone/\[dq]\f[R] are all treated identically.
+\f[C]--rc-baseurl\f[R], so \f[C]--rc-baseurl \[dq]rclone\[dq]\f[R],
+\f[C]--rc-baseurl \[dq]/rclone\[dq]\f[R] and
+\f[C]--rc-baseurl \[dq]/rclone/\[dq]\f[R] are all treated identically.
.SS TLS (SSL)
.PP
By default this will serve over http.
If you want you can serve over https.
-You will need to supply the \f[C]--cert\f[R] and \f[C]--key\f[R] flags.
+You will need to supply the \f[C]--rc-cert\f[R] and \f[C]--rc-key\f[R]
+flags.
If you wish to do client side certificate validation then you will need
-to supply \f[C]--client-ca\f[R] also.
+to supply \f[C]--rc-client-ca\f[R] also.
.PP
-\f[C]--cert\f[R] should be a either a PEM encoded certificate or a
+\f[C]--rc-cert\f[R] should be a either a PEM encoded certificate or a
concatenation of that with the CA certificate.
-\f[C]--key\f[R] should be the PEM encoded private key and
-\f[C]--client-ca\f[R] should be the PEM encoded client certificate
+\f[C]--krc-ey\f[R] should be the PEM encoded private key and
+\f[C]--rc-client-ca\f[R] should be the PEM encoded client certificate
authority certificate.
.PP
---min-tls-version is minimum TLS version that is acceptable.
+--rc-min-tls-version is minimum TLS version that is acceptable.
Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq]
and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]).
.SS Template
.PP
-\f[C]--template\f[R] allows a user to specify a custom markup template
-for HTTP and WebDAV serve functions.
+\f[C]--rc-template\f[R] allows a user to specify a custom markup
+template for HTTP and WebDAV serve functions.
The server exports the following markup to be used within the template
to server pages:
.PP
@@ -6237,10 +6314,16 @@ T}
By default this will serve files without needing a login.
.PP
You can either use an htpasswd file which can take lots of users, or set
-a single username and password with the \f[C]--user\f[R] and
-\f[C]--pass\f[R] flags.
+a single username and password with the \f[C]--rc-user\f[R] and
+\f[C]--rc-pass\f[R] flags.
.PP
-Use \f[C]--htpasswd /path/to/htpasswd\f[R] to provide an htpasswd file.
+If no static users are configured by either of the above methods, and
+client certificates are required by the \f[C]--client-ca\f[R] flag
+passed to the server, the client certificate common name will be
+considered as the username.
+.PP
+Use \f[C]--rc-htpasswd /path/to/htpasswd\f[R] to provide an htpasswd
+file.
This is in standard apache format and supports MD5, SHA1 and BCrypt for
basic authentication.
Bcrypt is recommended.
@@ -6257,9 +6340,9 @@ htpasswd -B htpasswd anotherUser
.PP
The password file can be updated while rclone is running.
.PP
-Use \f[C]--realm\f[R] to set the authentication realm.
+Use \f[C]--rc-realm\f[R] to set the authentication realm.
.PP
-Use \f[C]--salt\f[R] to change the password hashing salt from the
+Use \f[C]--rc-salt\f[R] to change the password hashing salt from the
default.
.IP
.nf
@@ -6600,7 +6683,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -6628,6 +6711,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -6922,7 +7019,7 @@ rclone serve dlna remote:path [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -7096,7 +7193,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -7124,6 +7221,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -7421,6 +7532,7 @@ rclone serve docker [flags]
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for docker
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
--network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don\[aq]t compare checksums on up/download
--no-modtime Don\[aq]t read/write the modification time (can speed things up)
@@ -7435,7 +7547,7 @@ rclone serve docker [flags]
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -7581,7 +7693,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -7609,6 +7721,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -7999,7 +8125,7 @@ rclone serve ftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default \[dq]anonymous\[dq])
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -8198,6 +8324,11 @@ You can either use an htpasswd file which can take lots of users, or set
a single username and password with the \f[C]--user\f[R] and
\f[C]--pass\f[R] flags.
.PP
+If no static users are configured by either of the above methods, and
+client certificates are required by the \f[C]--client-ca\f[R] flag
+passed to the server, the client certificate common name will be
+considered as the username.
+.PP
Use \f[C]--htpasswd /path/to/htpasswd\f[R] to provide an htpasswd file.
This is in standard apache format and supports MD5, SHA1 and BCrypt for
basic authentication.
@@ -8316,7 +8447,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -8344,6 +8475,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -8742,7 +8887,7 @@ rclone serve http remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -8925,6 +9070,11 @@ You can either use an htpasswd file which can take lots of users, or set
a single username and password with the \f[C]--user\f[R] and
\f[C]--pass\f[R] flags.
.PP
+If no static users are configured by either of the above methods, and
+client certificates are required by the \f[C]--client-ca\f[R] flag
+passed to the server, the client certificate common name will be
+considered as the username.
+.PP
Use \f[C]--htpasswd /path/to/htpasswd\f[R] to provide an htpasswd file.
This is in standard apache format and supports MD5, SHA1 and BCrypt for
basic authentication.
@@ -9046,7 +9196,7 @@ server to send commands to while the servers all have different views of
the state of the filing system.
.PP
The \[dq]restrict\[dq] in authorized_keys prevents SHA1SUMs and MD5SUMs
-from beeing used.
+from being used.
Omitting \[dq]restrict\[dq] and using \f[C]--sftp-path-override\f[R] to
enable checksumming is possible but less secure and you could use the
SFTP server provided by OpenSSH in this case.
@@ -9147,7 +9297,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -9175,6 +9325,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -9565,7 +9729,7 @@ rclone serve sftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -9797,6 +9961,11 @@ You can either use an htpasswd file which can take lots of users, or set
a single username and password with the \f[C]--user\f[R] and
\f[C]--pass\f[R] flags.
.PP
+If no static users are configured by either of the above methods, and
+client certificates are required by the \f[C]--client-ca\f[R] flag
+passed to the server, the client certificate common name will be
+considered as the username.
+.PP
Use \f[C]--htpasswd /path/to/htpasswd\f[R] to provide an htpasswd file.
This is in standard apache format and supports MD5, SHA1 and BCrypt for
basic authentication.
@@ -9915,7 +10084,7 @@ find that you need one or the other or both.
\f[C]
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max age of objects in the cache (default 1h0m0s)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
@@ -9943,6 +10112,20 @@ this size for two reasons.
Firstly because it is only checked every
\f[C]--vfs-cache-poll-interval\f[R].
Secondly because open files cannot be evicted from the cache.
+When \f[C]--vfs-cache-max-size\f[R] is exceeded, rclone will attempt to
+evict the least accessed files from the cache first.
+rclone will start with files that haven\[aq]t been accessed for the
+longest.
+This cache flushing strategy is efficient and more relevant files are
+likely to remain cached.
+.PP
+The \f[C]--vfs-cache-max-age\f[R] will evict files from the cache after
+the set time since last access has passed.
+The default value of 1 hour will start evicting files from cache that
+haven\[aq]t been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting.
+Specify the time with standard notation, s, m, h, d, w .
.PP
You \f[B]should not\f[R] run two copies of rclone using the same VFS
cache with the same or overlapping remotes if using
@@ -10343,7 +10526,7 @@ rclone serve webdav remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
- --vfs-cache-max-age Duration Max age of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
@@ -10434,7 +10617,7 @@ Run a test command
.PP
Rclone test is used to run test commands.
.PP
-Select which test comand you want with the subcommand, eg
+Select which test command you want with the subcommand, eg
.IP
.nf
\f[C]
@@ -11778,7 +11961,7 @@ When using this flag, rclone won\[aq]t update mtimes of remote files if
they are incorrect as it would normally.
.SS --color WHEN
.PP
-Specifiy when colors (and other ANSI codes) should be added to the
+Specify when colors (and other ANSI codes) should be added to the
output.
.PP
\f[C]AUTO\f[R] (default) only allows ANSI codes when the output is a
@@ -11916,6 +12099,24 @@ You may also choose to encrypt the file.
.PP
When token-based authentication are used, the configuration file must be
writable, because rclone needs to update the tokens inside it.
+.PP
+To reduce risk of corrupting an existing configuration file, rclone will
+not write directly to it when saving changes.
+Instead it will first write to a new, temporary, file.
+If a configuration file already existed, it will (on Unix systems) try
+to mirror its permissions to the new file.
+Then it will rename the existing file to a temporary name as backup.
+Next, rclone will rename the new file to the correct name, before
+finally cleaning up by deleting the backup file.
+.PP
+If the configuration file path used by rclone is a symbolic link, then
+this will be evaluated and rclone will write to the resolved path,
+instead of overwriting the symbolic link.
+Temporary files used in the process (described above) will be written to
+the same parent directory as that of the resolved configuration file,
+but if this directory is also a symbolic link it will not be resolved
+and the temporary files will be written to the location of the directory
+symbolic link.
.SS --contimeout=TIME
.PP
Set the connection timeout.
@@ -11949,6 +12150,18 @@ The default is \f[C]interactive\f[R].
.PD
See the dedupe command for more information as to what these options
mean.
+.SS --default-time TIME
+.PP
+If a file or directory does have a modification time rclone can read
+then rclone will display this fixed time instead.
+.PP
+The default is \f[C]2000-01-01 00:00:00 UTC\f[R].
+This can be configured in any of the ways shown in the time or duration
+options.
+.PP
+For example \f[C]--default-time 2020-06-01\f[R] to set the default time
+to the 1st of June 2020 or \f[C]--default-time 0s\f[R] to set the
+default time to the time rclone started up.
.SS --disable FEATURE,FEATURE,...
.PP
This disables a comma separated list of optional features.
@@ -11970,10 +12183,30 @@ To see a list of which features can be disabled use:
\f[R]
.fi
.PP
+The features a remote has can be seen in JSON format with:
+.IP
+.nf
+\f[C]
+rclone backend features remote:
+\f[R]
+.fi
+.PP
See the overview features (https://rclone.org/overview/#features) and
optional features (https://rclone.org/overview/#optional-features) to
get an idea of which feature does what.
.PP
+Note that some features can be set to \f[C]true\f[R] if they are
+\f[C]true\f[R]/\f[C]false\f[R] feature flag features by prefixing them
+with \f[C]!\f[R].
+For example the \f[C]CaseInsensitive\f[R] feature can be forced to
+\f[C]false\f[R] with \f[C]--disable CaseInsensitive\f[R] and forced to
+\f[C]true\f[R] with \f[C]--disable \[aq]!CaseInsensitive\[aq]\f[R].
+In general it isn\[aq]t a good idea doing this but it may be useful in
+extremis.
+.PP
+(Note that \f[C]!\f[R] is a shell command which you will need to escape
+with single quotes or a backslash on unix like platforms.)
+.PP
This flag can be useful for debugging and in exceptional circumstances
(e.g.
Google Drive limiting the total volume of Server Side Copies to 100
@@ -12223,6 +12456,55 @@ well as modification.
This can be useful as an additional layer of protection for immutable or
append-only data sets (notably backup archives), where modification
implies corruption and should not be propagated.
+.SS --inplace
+.PP
+The \f[C]--inplace\f[R] flag changes the behaviour of rclone when
+uploading files to some backends (backends with the
+\f[C]PartialUploads\f[R] feature flag set) such as:
+.IP \[bu] 2
+local
+.IP \[bu] 2
+ftp
+.IP \[bu] 2
+sftp
+.PP
+Without \f[C]--inplace\f[R] (the default) rclone will first upload to a
+temporary file with an extension like this where \f[C]XXXXXX\f[R]
+represents a random string.
+.IP
+.nf
+\f[C]
+original-file-name.XXXXXX.partial
+\f[R]
+.fi
+.PP
+(rclone will make sure the final name is no longer than 100 characters
+by truncating the \f[C]original-file-name\f[R] part if necessary).
+.PP
+When the upload is complete, rclone will rename the \f[C].partial\f[R]
+file to the correct name, overwriting any existing file at that point.
+If the upload fails then the \f[C].partial\f[R] file will be deleted.
+.PP
+This prevents other users of the backend from seeing partially uploaded
+files in their new names and prevents overwriting the old file until the
+new one is completely uploaded.
+.PP
+If the \f[C]--inplace\f[R] flag is supplied, rclone will upload directly
+to the final name without creating a \f[C].partial\f[R] file.
+.PP
+This means that an incomplete file will be visible in the directory
+listings while the upload is in progress and any existing files will be
+overwritten as soon as the upload starts.
+If the transfer fails then the file will be deleted.
+This can cause data loss of the existing file if the transfer fails.
+.PP
+Note that on the local file system if you don\[aq]t use
+\f[C]--inplace\f[R] hard links (Unix only) will be broken.
+And if you do use \f[C]--inplace\f[R] you won\[aq]t be able to update in
+use executables.
+.PP
+Note also that versions of rclone prior to v1.63.0 behave as if the
+\f[C]--inplace\f[R] flag is always supplied.
.SS -i, --interactive
.PP
This flag can be used to tell rclone that you wish a manual confirmation
@@ -12447,6 +12729,26 @@ if you are reading and writing to an OS X filing system this will be
\f[C]1s\f[R] by default.
.PP
This command line flag allows you to override that computed default.
+.SS --multi-thread-write-buffer-size=SIZE
+.PP
+When downloading with multiple threads, rclone will buffer SIZE bytes in
+memory before writing to disk for each thread.
+.PP
+This can improve performance if the underlying filesystem does not deal
+well with a lot of small writes in different positions of the file, so
+if you see downloads being limited by disk write speed, you might want
+to experiment with different values.
+Specially for magnetic drives and remote file systems a higher value can
+be useful.
+.PP
+Nevertheless, the default of \f[C]128k\f[R] should be fine for almost
+all use cases, so before changing it ensure that network is not really
+your bottleneck.
+.PP
+As a final hint, size is not the only factor: block size (or similar
+concept) can have an impact.
+In one case, we observed that exact multiples of 16k performed much
+better than other values.
.SS --multi-thread-cutoff=SIZE
.PP
When downloading files to the local backend above this size, rclone will
@@ -12894,6 +13196,13 @@ So let\[aq]s say we had \f[C]--suffix -2019-01-01\f[R], without the flag
and with the flag it would be backed up to
\f[C]file-2019-01-01.txt\f[R].
This can be helpful to make sure the suffixed files can still be opened.
+.PP
+If a file has two (or more) extensions and the second (or subsequent)
+extension is recognised as a valid mime type, then the suffix will go
+before that extension.
+So \f[C]file.tar.gz\f[R] would be backed up to
+\f[C]file-2019-01-01.tar.gz\f[R] whereas \f[C]file.badextension.gz\f[R]
+would be backed up to \f[C]file.badextension-2019-01-01.gz\f[R].
.SS --syslog
.PP
On capable OSes (not Windows or Plan9) send all log output to syslog.
@@ -14120,7 +14429,7 @@ Which will match a directory called \f[C]start\f[R] with a file called
characters.
.PP
Note that you can use \f[C]-vv --dump filters\f[R] to show the filter
-patterns in regexp format - rclone implements the glob patters by
+patterns in regexp format - rclone implements the glob patterns by
transforming them into regular expressions.
.SS Filter pattern examples
.PP
@@ -15887,7 +16196,7 @@ See the config dump (https://rclone.org/commands/rclone_config_dump/)
command for more information on the above.
.PP
\f[B]Authentication is required for this call.\f[R]
-.SS config/listremotes: Lists the remotes in the config file.
+.SS config/listremotes: Lists the remotes in the config file and defined in environment variables.
.PP
Returns - remotes - array of remote names
.PP
@@ -16578,13 +16887,13 @@ for more information on the above.
This takes the following parameters:
.IP \[bu] 2
srcFs - a remote name string e.g.
-\[dq]drive:\[dq] for the source
+\[dq]drive:\[dq] for the source, \[dq]/\[dq] for local filesystem
.IP \[bu] 2
srcRemote - a path within that remote e.g.
\[dq]file.txt\[dq] for the source
.IP \[bu] 2
dstFs - a remote name string e.g.
-\[dq]drive2:\[dq] for the destination
+\[dq]drive2:\[dq] for the destination, \[dq]/\[dq] for local filesystem
.IP \[bu] 2
dstRemote - a path within that remote e.g.
\[dq]file2.txt\[dq] for the destination
@@ -16820,13 +17129,13 @@ more information on the above.
This takes the following parameters:
.IP \[bu] 2
srcFs - a remote name string e.g.
-\[dq]drive:\[dq] for the source
+\[dq]drive:\[dq] for the source, \[dq]/\[dq] for local filesystem
.IP \[bu] 2
srcRemote - a path within that remote e.g.
\[dq]file.txt\[dq] for the source
.IP \[bu] 2
dstFs - a remote name string e.g.
-\[dq]drive2:\[dq] for the destination
+\[dq]drive2:\[dq] for the destination, \[dq]/\[dq] for local filesystem
.IP \[bu] 2
dstRemote - a path within that remote e.g.
\[dq]file2.txt\[dq] for the destination
@@ -18105,6 +18414,21 @@ T}@T{
-
T}
T{
+PikPak
+T}@T{
+MD5
+T}@T{
+R
+T}@T{
+No
+T}@T{
+No
+T}@T{
+R
+T}@T{
+-
+T}
+T{
premiumize.me
T}@T{
-
@@ -18325,10 +18649,11 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
\f[C]md5sum\f[R] or \f[C]sha1sum\f[R] as well as \f[C]echo\f[R] are in
the remote\[aq]s PATH.
.PP
-\[S3] WebDAV supports hashes when used with Owncloud and Nextcloud only.
+\[S3] WebDAV supports hashes when used with Fastmail Files.
+Owncloud and Nextcloud only.
.PP
-\[u2074] WebDAV supports modtimes when used with Owncloud and Nextcloud
-only.
+\[u2074] WebDAV supports modtimes when used with Fastmail Files,
+Owncloud and Nextcloud only.
.PP
\[u2075]
QuickXorHash (https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash)
@@ -19390,7 +19715,7 @@ No
T}@T{
No
T}@T{
-Yes
+No
T}@T{
No
T}@T{
@@ -19882,6 +20207,29 @@ T}@T{
Yes
T}
T{
+PikPak
+T}@T{
+Yes
+T}@T{
+Yes
+T}@T{
+Yes
+T}@T{
+Yes
+T}@T{
+Yes
+T}@T{
+No
+T}@T{
+No
+T}@T{
+Yes
+T}@T{
+Yes
+T}@T{
+Yes
+T}
+T{
premiumize.me
T}@T{
Yes
@@ -20303,166 +20651,169 @@ These flags are available for every command.
.IP
.nf
\f[C]
- --ask-password Allow prompt for password for encrypted configuration (default true)
- --auto-confirm If enabled, do not request console confirmation
- --backup-dir string Make backups into hierarchy based in DIR
- --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
- --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
- --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
- --ca-cert stringArray CA certificate used to verify servers
- --cache-dir string Directory rclone will use for caching (default \[dq]$HOME/.cache/rclone\[dq])
- --check-first Do all the checks before starting transfers
- --checkers int Number of checkers to run in parallel (default 8)
- -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
- --client-cert string Client SSL certificate (PEM) for mutual TLS auth
- --client-key string Client SSL private key (PEM) for mutual TLS auth
- --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default \[dq]AUTO\[dq])
- --compare-dest stringArray Include additional comma separated server-side paths during comparison
- --config string Config file (default \[dq]$HOME/.config/rclone/rclone.conf\[dq])
- --contimeout Duration Connect timeout (default 1m0s)
- --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
- --cpuprofile string Write cpu profile to file
- --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default \[dq]HARD\[dq])
- --delete-after When synchronizing, delete files on destination after transferring (default)
- --delete-before When synchronizing, delete files on destination before transferring
- --delete-during When synchronizing, delete files during transfer
- --delete-excluded Delete files on dest excluded from sync
- --disable string Disable a comma separated list of features (use --disable help to see a list)
- --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
- --disable-http2 Disable HTTP/2 in the global transport
- -n, --dry-run Do a trial run with no permanent changes
- --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
- --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
- --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
- --dump-headers Dump HTTP headers - may contain sensitive info
- --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
- --fast-list Use recursive list if available; uses more memory but fewer transactions
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
- --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
- --header stringArray Set HTTP header for all transactions
- --header-download stringArray Set HTTP header for download transactions
- --header-upload stringArray Set HTTP header for upload transactions
- --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
- --ignore-case Ignore case in filters (case insensitive)
- --ignore-case-sync Ignore case when synchronizing
- --ignore-checksum Skip post copy check of checksums
- --ignore-errors Delete even if there are I/O errors
- --ignore-existing Skip all files that exist on destination
- --ignore-size Ignore size when skipping use mod-time or checksum
- -I, --ignore-times Don\[aq]t skip files that match size and time - transfer all files
- --immutable Do not modify files, fail if existing files have been modified
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- -i, --interactive Enable interactive mode
- --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
- --log-file string Log everything to this file
- --log-format string Comma separated list of log format options (default \[dq]date,time\[dq])
- --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default \[dq]NOTICE\[dq])
- --log-systemd Activate systemd integration for the logger
- --low-level-retries int Number of low level retries to do (default 10)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
- --max-delete int When synchronizing, limit the number of deletes (default -1)
- --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
- --max-transfer SizeSuffix Maximum size of data to transfer (default off)
- --memprofile string Write memory profile to file
- -M, --metadata If set, preserve metadata when copying objects
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --metadata-set stringArray Add metadata key=value when uploading
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
- --modify-window Duration Max time diff to be considered the same (default 1ns)
- --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
- --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
- --no-check-certificate Do not verify the server SSL certificate (insecure)
- --no-check-dest Don\[aq]t check the destination, copy regardless
- --no-console Hide console window (supported on Windows only)
- --no-gzip-encoding Don\[aq]t set Accept-Encoding: gzip
- --no-traverse Don\[aq]t traverse destination file system on copy
- --no-unicode-normalization Don\[aq]t normalize unicode characters in filenames
- --no-update-modtime Don\[aq]t update destination mod-time if files identical
- --order-by string Instructions on how to order the transfers, e.g. \[aq]size,descending\[aq]
- --password-command SpaceSepList Command for supplying password for encrypted configuration
- -P, --progress Show progress during transfer
- --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
- -q, --quiet Print as little stuff as possible
- --rc Enable the remote control server
- --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
- --rc-allow-origin string Set the allowed origin for CORS
- --rc-baseurl string Prefix for URLs - leave blank for root
- --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
- --rc-client-ca string Client certificate authority to verify clients with
- --rc-enable-metrics Enable prometheus metrics on /metrics
- --rc-files string Path to local files to serve on the HTTP server
- --rc-htpasswd string A htpasswd file - if not provided no authentication is done
- --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
- --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
- --rc-key string TLS PEM Private key
- --rc-max-header-bytes int Maximum size of request header (default 4096)
- --rc-min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq])
- --rc-no-auth Don\[aq]t require auth for certain methods
- --rc-pass string Password for authentication
- --rc-realm string Realm for authentication
- --rc-salt string Password hashing salt (default \[dq]dlPL2MqE\[dq])
- --rc-serve Enable the serving of remote objects
- --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --rc-template string User-specified template
- --rc-user string User name for authentication
- --rc-web-fetch-url string URL to fetch the releases for webgui (default \[dq]https://api.github.com/repos/rclone/rclone-webui-react/releases/latest\[dq])
- --rc-web-gui Launch WebGUI on localhost
- --rc-web-gui-force-update Force update to latest version of web gui
- --rc-web-gui-no-open-browser Don\[aq]t open the browser automatically
- --rc-web-gui-update Check and update to latest version of web gui
- --refresh-times Refresh the modtime of remote files
- --retries int Retry operations this many times if they fail (default 3)
- --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
- --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
- --size-only Skip based on size only, not mod-time or checksum
- --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
- --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
- --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default \[dq]INFO\[dq])
- --stats-one-line Make the stats fit on one line
- --stats-one-line-date Enable --stats-one-line and add current date/time prefix
- --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes (\[dq]), see https://golang.org/pkg/time/#Time.Format
- --stats-unit string Show data rate in stats as either \[aq]bits\[aq] or \[aq]bytes\[aq] per second (default \[dq]bytes\[dq])
- --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
- --suffix string Suffix to add to changed files
- --suffix-keep-extension Preserve the extension when using --suffix
- --syslog Use Syslog for logging
- --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default \[dq]DAEMON\[dq])
- --temp-dir string Directory rclone will use for temporary files (default \[dq]/tmp\[dq])
- --timeout Duration IO idle timeout (default 5m0s)
- --tpslimit float Limit HTTP transactions per second to this
- --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
- --track-renames When synchronizing, track file renames and do a server-side move if possible
- --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default \[dq]hash\[dq])
- --transfers int Number of file transfers to run in parallel (default 4)
- -u, --update Skip files that are newer on the destination
- --use-cookies Enable session cookiejar
- --use-json-log Use json log format
- --use-mmap Use mmap allocator (see docs)
- --use-server-modtime Use server modified time instead of object metadata
- --user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.62.0\[dq])
- -v, --verbose count Print lots more stuff (repeat for more)
+ --ask-password Allow prompt for password for encrypted configuration (default true)
+ --auto-confirm If enabled, do not request console confirmation
+ --backup-dir string Make backups into hierarchy based in DIR
+ --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name
+ --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
+ --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+ --ca-cert stringArray CA certificate used to verify servers
+ --cache-dir string Directory rclone will use for caching (default \[dq]$HOME/.cache/rclone\[dq])
+ --check-first Do all the checks before starting transfers
+ --checkers int Number of checkers to run in parallel (default 8)
+ -c, --checksum Skip based on checksum (if available) & size, not mod-time & size
+ --client-cert string Client SSL certificate (PEM) for mutual TLS auth
+ --client-key string Client SSL private key (PEM) for mutual TLS auth
+ --color string When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default \[dq]AUTO\[dq])
+ --compare-dest stringArray Include additional comma separated server-side paths during comparison
+ --config string Config file (default \[dq]$HOME/.config/rclone/rclone.conf\[dq])
+ --contimeout Duration Connect timeout (default 1m0s)
+ --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
+ --cpuprofile string Write cpu profile to file
+ --cutoff-mode string Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default \[dq]HARD\[dq])
+ --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
+ --delete-after When synchronizing, delete files on destination after transferring (default)
+ --delete-before When synchronizing, delete files on destination before transferring
+ --delete-during When synchronizing, delete files during transfer
+ --delete-excluded Delete files on dest excluded from sync
+ --disable string Disable a comma separated list of features (use --disable help to see a list)
+ --disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
+ --disable-http2 Disable HTTP/2 in the global transport
+ -n, --dry-run Do a trial run with no permanent changes
+ --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
+ --dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
+ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info
+ --dump-headers Dump HTTP headers - may contain sensitive info
+ --error-on-no-transfer Sets exit code 9 if no files are transferred, useful in scripts
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s)
+ --fast-list Use recursive list if available; uses more memory but fewer transactions
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
+ --fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
+ --header stringArray Set HTTP header for all transactions
+ --header-download stringArray Set HTTP header for download transactions
+ --header-upload stringArray Set HTTP header for upload transactions
+ --human-readable Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi
+ --ignore-case Ignore case in filters (case insensitive)
+ --ignore-case-sync Ignore case when synchronizing
+ --ignore-checksum Skip post copy check of checksums
+ --ignore-errors Delete even if there are I/O errors
+ --ignore-existing Skip all files that exist on destination
+ --ignore-size Ignore size when skipping use mod-time or checksum
+ -I, --ignore-times Don\[aq]t skip files that match size and time - transfer all files
+ --immutable Do not modify files, fail if existing files have been modified
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --inplace Download directly to destination file instead of atomic download to temp/rename
+ -i, --interactive Enable interactive mode
+ --kv-lock-time Duration Maximum time to keep key-value database locked by process (default 1s)
+ --log-file string Log everything to this file
+ --log-format string Comma separated list of log format options (default \[dq]date,time\[dq])
+ --log-level string Log level DEBUG|INFO|NOTICE|ERROR (default \[dq]NOTICE\[dq])
+ --log-systemd Activate systemd integration for the logger
+ --low-level-retries int Number of low level retries to do (default 10)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
+ --max-delete int When synchronizing, limit the number of deletes (default -1)
+ --max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-duration Duration Maximum duration rclone will transfer data for (default 0s)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
+ --max-transfer SizeSuffix Maximum size of data to transfer (default off)
+ --memprofile string Write memory profile to file
+ -M, --metadata If set, preserve metadata when copying objects
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --metadata-set stringArray Add metadata key=value when uploading
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+ --modify-window Duration Max time diff to be considered the same (default 1ns)
+ --multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 250Mi)
+ --multi-thread-streams int Max number of streams to use for multi-thread downloads (default 4)
+ --multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
+ --no-check-certificate Do not verify the server SSL certificate (insecure)
+ --no-check-dest Don\[aq]t check the destination, copy regardless
+ --no-console Hide console window (supported on Windows only)
+ --no-gzip-encoding Don\[aq]t set Accept-Encoding: gzip
+ --no-traverse Don\[aq]t traverse destination file system on copy
+ --no-unicode-normalization Don\[aq]t normalize unicode characters in filenames
+ --no-update-modtime Don\[aq]t update destination mod-time if files identical
+ --order-by string Instructions on how to order the transfers, e.g. \[aq]size,descending\[aq]
+ --password-command SpaceSepList Command for supplying password for encrypted configuration
+ -P, --progress Show progress during transfer
+ --progress-terminal-title Show progress on the terminal title (requires -P/--progress)
+ -q, --quiet Print as little stuff as possible
+ --rc Enable the remote control server
+ --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
+ --rc-allow-origin string Set the allowed origin for CORS
+ --rc-baseurl string Prefix for URLs - leave blank for root
+ --rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --rc-client-ca string Client certificate authority to verify clients with
+ --rc-enable-metrics Enable prometheus metrics on /metrics
+ --rc-files string Path to local files to serve on the HTTP server
+ --rc-htpasswd string A htpasswd file - if not provided no authentication is done
+ --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
+ --rc-job-expire-interval Duration Interval to check for expired async jobs (default 10s)
+ --rc-key string TLS PEM Private key
+ --rc-max-header-bytes int Maximum size of request header (default 4096)
+ --rc-min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq])
+ --rc-no-auth Don\[aq]t require auth for certain methods
+ --rc-pass string Password for authentication
+ --rc-realm string Realm for authentication
+ --rc-salt string Password hashing salt (default \[dq]dlPL2MqE\[dq])
+ --rc-serve Enable the serving of remote objects
+ --rc-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --rc-template string User-specified template
+ --rc-user string User name for authentication
+ --rc-web-fetch-url string URL to fetch the releases for webgui (default \[dq]https://api.github.com/repos/rclone/rclone-webui-react/releases/latest\[dq])
+ --rc-web-gui Launch WebGUI on localhost
+ --rc-web-gui-force-update Force update to latest version of web gui
+ --rc-web-gui-no-open-browser Don\[aq]t open the browser automatically
+ --rc-web-gui-update Check and update to latest version of web gui
+ --refresh-times Refresh the modtime of remote files
+ --retries int Retry operations this many times if they fail (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
+ --size-only Skip based on size only, not mod-time or checksum
+ --stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
+ --stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
+ --stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default \[dq]INFO\[dq])
+ --stats-one-line Make the stats fit on one line
+ --stats-one-line-date Enable --stats-one-line and add current date/time prefix
+ --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes (\[dq]), see https://golang.org/pkg/time/#Time.Format
+ --stats-unit string Show data rate in stats as either \[aq]bits\[aq] or \[aq]bytes\[aq] per second (default \[dq]bytes\[dq])
+ --streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
+ --suffix string Suffix to add to changed files
+ --suffix-keep-extension Preserve the extension when using --suffix
+ --syslog Use Syslog for logging
+ --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default \[dq]DAEMON\[dq])
+ --temp-dir string Directory rclone will use for temporary files (default \[dq]/tmp\[dq])
+ --timeout Duration IO idle timeout (default 5m0s)
+ --tpslimit float Limit HTTP transactions per second to this
+ --tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
+ --track-renames When synchronizing, track file renames and do a server-side move if possible
+ --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default \[dq]hash\[dq])
+ --transfers int Number of file transfers to run in parallel (default 4)
+ -u, --update Skip files that are newer on the destination
+ --use-cookies Enable session cookiejar
+ --use-json-log Use json log format
+ --use-mmap Use mmap allocator (see docs)
+ --use-server-modtime Use server modified time instead of object metadata
+ --user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.63.0\[dq])
+ -v, --verbose count Print lots more stuff (repeat for more)
\f[R]
.fi
.SS Backend Flags
@@ -20472,554 +20823,581 @@ They control the backends and may be set in the config file.
.IP
.nf
\f[C]
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
- --alias-remote string Remote or path to alias
- --azureblob-access-tier string Access tier of blob: hot, cool or archive
- --azureblob-account string Azure Storage Account Name
- --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
- --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
- --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
- --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
- --azureblob-client-id string The ID of the client in use
- --azureblob-client-secret string One of the service principal\[aq]s client secrets
- --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
- --azureblob-disable-checksum Don\[aq]t store MD5 checksum with object metadata
- --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
- --azureblob-endpoint string Endpoint for the service
- --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
- --azureblob-key string Storage Account Shared Key
- --azureblob-list-chunk int Size of blob list (default 5000)
- --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
- --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
- --azureblob-no-check-container If set, don\[aq]t attempt to check the container exists or create it
- --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
- --azureblob-password string The user\[aq]s password (obscured)
- --azureblob-public-access string Public access level of a container: blob or container
- --azureblob-sas-url string SAS URL for container level access only
- --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
- --azureblob-tenant string ID of the service principal\[aq]s tenant. Also called its directory ID
- --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
- --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
- --azureblob-use-emulator Uses local storage emulator if provided as \[aq]true\[aq]
- --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
- --azureblob-username string User name (usually an email address)
- --b2-account string Account ID or Application Key ID
- --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
- --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
- --b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
- --b2-download-url string Custom endpoint for downloads
- --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --b2-endpoint string Endpoint for the service
- --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
- --b2-key string Application Key
- --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
- --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --b2-version-at Time Show file versions as they were at the specified time (default off)
- --b2-versions Include old versions in directory listings
- --box-access-token string Box App Primary Access Token
- --box-auth-url string Auth server URL
- --box-box-config-file string Box App config.json location
- --box-box-sub-type string (default \[dq]user\[dq])
- --box-client-id string OAuth Client Id
- --box-client-secret string OAuth Client Secret
- --box-commit-retries int Max number of times to try committing a multipart file (default 100)
- --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
- --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
- --box-owned-by string Only show items owned by the login (email address) passed in
- --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
- --box-token string OAuth Access Token as a JSON blob
- --box-token-url string Token server url
- --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
- --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
- --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
- --cache-chunk-path string Directory to cache chunk files (default \[dq]$HOME/.cache/rclone/cache-backend\[dq])
- --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
- --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
- --cache-db-path string Directory to store file structure metadata DB (default \[dq]$HOME/.cache/rclone/cache-backend\[dq])
- --cache-db-purge Clear all the cached data for this remote on start
- --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
- --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
- --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
- --cache-plex-password string The password of the Plex user (obscured)
- --cache-plex-url string The URL of the Plex server
- --cache-plex-username string The username of the Plex user
- --cache-read-retries int How many times to retry a read from a cache storage (default 10)
- --cache-remote string Remote to cache
- --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
- --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
- --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
- --cache-workers int How many workers should run in parallel to download chunks (default 4)
- --cache-writes Cache file data on writes through the FS
- --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
- --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
- --chunker-hash-type string Choose how chunker handles hash sums (default \[dq]md5\[dq])
- --chunker-remote string Remote to chunk/unchunk
- --combine-upstreams SpaceSepList Upstreams for combining
- --compress-level int GZIP compression level (-2 to 9) (default -1)
- --compress-mode string Compression mode (default \[dq]gzip\[dq])
- --compress-ram-cache-limit SizeSuffix Some remotes don\[aq]t allow the upload of files with unknown size (default 20Mi)
- --compress-remote string Remote to compress
- -L, --copy-links Follow symlinks and copy the pointed to item
- --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
- --crypt-filename-encoding string How to encode the encrypted filename to text string (default \[dq]base32\[dq])
- --crypt-filename-encryption string How to encrypt the filenames (default \[dq]standard\[dq])
- --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
- --crypt-password string Password or pass phrase for encryption (obscured)
- --crypt-password2 string Password or pass phrase for salt (obscured)
- --crypt-remote string Remote to encrypt/decrypt
- --crypt-server-side-across-configs Allow server-side operations (e.g. copy) to work across different crypt configs
- --crypt-show-mapping For all files listed show how the names encrypt
- --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
- --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
- --drive-auth-owner-only Only consider files owned by the authenticated user
- --drive-auth-url string Auth server URL
- --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
- --drive-client-id string Google Application Client Id
- --drive-client-secret string OAuth Client Secret
- --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
- --drive-disable-http2 Disable drive using http2 (default true)
- --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
- --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default \[dq]docx,xlsx,pptx,svg\[dq])
- --drive-formats string Deprecated: See export_formats
- --drive-impersonate string Impersonate this user when using a service account
- --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
- --drive-keep-revision-forever Keep new head revision of each file forever
- --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
- --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
- --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
- --drive-resource-key string Resource key for accessing a link-shared file
- --drive-root-folder-id string ID of the root folder
- --drive-scope string Scope that rclone should use when requesting access from drive
- --drive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different drive configs
- --drive-service-account-credentials string Service Account Credentials JSON blob
- --drive-service-account-file string Service Account Credentials JSON file path
- --drive-shared-with-me Only show files that are shared with me
- --drive-size-as-quota Show sizes as storage quota usage, not actual size
- --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
- --drive-skip-dangling-shortcuts If set skip dangling shortcut files
- --drive-skip-gdocs Skip google documents in all listings
- --drive-skip-shortcuts If set skip shortcut files
- --drive-starred-only Only show files that are starred
- --drive-stop-on-download-limit Make download limit errors be fatal
- --drive-stop-on-upload-limit Make upload limit errors be fatal
- --drive-team-drive string ID of the Shared Drive (Team Drive)
- --drive-token string OAuth Access Token as a JSON blob
- --drive-token-url string Token server url
- --drive-trashed-only Only show files that are in the trash
- --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
- --drive-use-created-date Use file created date instead of modified date
- --drive-use-shared-date Use date file was shared instead of modified date
- --drive-use-trash Send files to the trash instead of deleting permanently (default true)
- --drive-v2-download-min-size SizeSuffix If Object\[aq]s are greater, use drive v2 API to download (default off)
- --dropbox-auth-url string Auth server URL
- --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
- --dropbox-batch-mode string Upload file batching sync|async|off (default \[dq]sync\[dq])
- --dropbox-batch-size int Max number of files in upload batch
- --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
- --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
- --dropbox-client-id string OAuth Client Id
- --dropbox-client-secret string OAuth Client Secret
- --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
- --dropbox-impersonate string Impersonate this user when using a business account
- --dropbox-shared-files Instructs rclone to work on individual shared files
- --dropbox-shared-folders Instructs rclone to work on shared folders
- --dropbox-token string OAuth Access Token as a JSON blob
- --dropbox-token-url string Token server url
- --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
- --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
- --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
- --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
- --fichier-shared-folder string If you want to download a shared folder, add this parameter
- --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --filefabric-permanent-token string Permanent Authentication Token
- --filefabric-root-folder-id string ID of the root folder
- --filefabric-token string Session Token
- --filefabric-token-expiry string Token expiry time
- --filefabric-url string URL of the Enterprise File Fabric to connect to
- --filefabric-version string Version read from the file fabric
- --ftp-ask-password Allow asking for FTP password when needed
- --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
- --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
- --ftp-disable-epsv Disable using EPSV even if server advertises support
- --ftp-disable-mlsd Disable using MLSD even if server advertises support
- --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
- --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
- --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
- --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
- --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
- --ftp-host string FTP host to connect to
- --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --ftp-no-check-certificate Do not verify the TLS certificate of the server
- --ftp-pass string FTP password (obscured)
- --ftp-port int FTP port number (default 21)
- --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
- --ftp-tls Use Implicit FTPS (FTP over TLS)
- --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
- --ftp-user string FTP username (default \[dq]$USER\[dq])
- --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
- --gcs-anonymous Access public buckets and objects without credentials
- --gcs-auth-url string Auth server URL
- --gcs-bucket-acl string Access Control List for new buckets
- --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
- --gcs-client-id string OAuth Client Id
- --gcs-client-secret string OAuth Client Secret
- --gcs-decompress If set this will decompress gzip encoded objects
- --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gcs-endpoint string Endpoint for the service
- --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
- --gcs-location string Location for the newly created buckets
- --gcs-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it
- --gcs-object-acl string Access Control List for new objects
- --gcs-project-number string Project number
- --gcs-service-account-file string Service Account Credentials JSON file path
- --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
- --gcs-token string OAuth Access Token as a JSON blob
- --gcs-token-url string Token server url
- --gphotos-auth-url string Auth server URL
- --gphotos-client-id string OAuth Client Id
- --gphotos-client-secret string OAuth Client Secret
- --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
- --gphotos-include-archived Also view and download archived media
- --gphotos-read-only Set to make the Google Photos backend read only
- --gphotos-read-size Set to read the size of media items
- --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
- --gphotos-token string OAuth Access Token as a JSON blob
- --gphotos-token-url string Token server url
- --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
- --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
- --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
- --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
- --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
- --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
- --hdfs-namenode string Hadoop name node and port
- --hdfs-service-principal-name string Kerberos service principal name for the namenode
- --hdfs-username string Hadoop user name
- --hidrive-auth-url string Auth server URL
- --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
- --hidrive-client-id string OAuth Client Id
- --hidrive-client-secret string OAuth Client Secret
- --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
- --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --hidrive-endpoint string Endpoint for the service (default \[dq]https://api.hidrive.strato.com/2.1\[dq])
- --hidrive-root-prefix string The root/parent folder for all paths (default \[dq]/\[dq])
- --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default \[dq]rw\[dq])
- --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default \[dq]user\[dq])
- --hidrive-token string OAuth Access Token as a JSON blob
- --hidrive-token-url string Token server url
- --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
- --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
- --http-headers CommaSepList Set HTTP headers for all transactions
- --http-no-head Don\[aq]t use HEAD requests
- --http-no-slash Set this if the site doesn\[aq]t end directories with /
- --http-url string URL of HTTP host to connect to
- --internetarchive-access-key-id string IAS3 Access Key
- --internetarchive-disable-checksum Don\[aq]t ask the server to test against MD5 checksum calculated by rclone (default true)
- --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
- --internetarchive-endpoint string IAS3 Endpoint (default \[dq]https://s3.us.archive.org\[dq])
- --internetarchive-front-endpoint string Host of InternetArchive Frontend (default \[dq]https://archive.org\[dq])
- --internetarchive-secret-access-key string IAS3 Secret Key (password)
- --internetarchive-wait-archive Duration Timeout for waiting the server\[aq]s processing tasks (specifically archive and book_op) to finish (default 0s)
- --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
- --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
- --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
- --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
- --jottacloud-trashed-only Only show files that are in the trash
- --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail\[aq]s (default 10Mi)
- --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --koofr-endpoint string The Koofr API endpoint to use
- --koofr-mountid string Mount ID of the mount to use
- --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
- --koofr-provider string Choose your storage provider
- --koofr-setmtime Does the backend support setting modification time (default true)
- --koofr-user string Your user name
- -l, --links Translate symlinks to/from regular files with a \[aq].rclonelink\[aq] extension
- --local-case-insensitive Force the filesystem to report itself as case insensitive
- --local-case-sensitive Force the filesystem to report itself as case sensitive
- --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
- --local-no-check-updated Don\[aq]t check to see if the files change during upload
- --local-no-preallocate Disable preallocation of disk space for transferred files
- --local-no-set-modtime Disable setting modtime
- --local-no-sparse Disable sparse files for multi-thread downloads
- --local-nounc Disable UNC (long path names) conversion on Windows
- --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
- --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
- --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
- --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --mailru-pass string Password (obscured)
- --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
- --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default \[dq]*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf\[dq])
- --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
- --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
- --mailru-user string User name (usually email)
- --mega-debug Output more debug from Mega
- --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --mega-hard-delete Delete files permanently rather than putting them into the trash
- --mega-pass string Password (obscured)
- --mega-use-https Use HTTPS for transfers
- --mega-user string User name
- --netstorage-account string Set the NetStorage account name
- --netstorage-host string Domain+path of NetStorage host to connect to
- --netstorage-protocol string Select between HTTP or HTTPS protocol (default \[dq]https\[dq])
- --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
- -x, --one-file-system Don\[aq]t cross filesystem boundaries (unix/macOS only)
- --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
- --onedrive-auth-url string Auth server URL
- --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
- --onedrive-client-id string OAuth Client Id
- --onedrive-client-secret string OAuth Client Secret
- --onedrive-drive-id string The ID of the drive to use
- --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
- --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
- --onedrive-hash-type string Specify the hash in use for the backend (default \[dq]auto\[dq])
- --onedrive-link-password string Set the password for links created by the link command
- --onedrive-link-scope string Set the scope of the links created by the link command (default \[dq]anonymous\[dq])
- --onedrive-link-type string Set the type of the links created by the link command (default \[dq]view\[dq])
- --onedrive-list-chunk int Size of listing chunk (default 1000)
- --onedrive-no-versions Remove all versions on modifying operations
- --onedrive-region string Choose national cloud region for OneDrive (default \[dq]global\[dq])
- --onedrive-root-folder-id string ID of the root folder
- --onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
- --onedrive-token string OAuth Access Token as a JSON blob
- --onedrive-token-url string Token server url
- --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --oos-compartment string Object storage compartment OCID
- --oos-config-file string Path to OCI config file (default \[dq]\[ti]/.oci/config\[dq])
- --oos-config-profile string Profile name inside the oci config file (default \[dq]Default\[dq])
- --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --oos-copy-timeout Duration Timeout for copy (default 1m0s)
- --oos-disable-checksum Don\[aq]t store MD5 checksum with object metadata
- --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --oos-endpoint string Endpoint for Object storage API
- --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --oos-namespace string Object storage namespace
- --oos-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it
- --oos-provider string Choose your Auth Provider (default \[dq]env_auth\[dq])
- --oos-region string Object storage Region
- --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies \[dq]AES256\[dq] as the encryption algorithm
- --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
- --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
- --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
- --oos-sse-kms-key-id string if using using your own master key in vault, this header specifies the
- --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default \[dq]Standard\[dq])
- --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
- --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
- --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
- --opendrive-password string Password (obscured)
- --opendrive-username string Username
- --pcloud-auth-url string Auth server URL
- --pcloud-client-id string OAuth Client Id
- --pcloud-client-secret string OAuth Client Secret
- --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --pcloud-hostname string Hostname to connect to (default \[dq]api.pcloud.com\[dq])
- --pcloud-password string Your pcloud password (obscured)
- --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default \[dq]d0\[dq])
- --pcloud-token string OAuth Access Token as a JSON blob
- --pcloud-token-url string Token server url
- --pcloud-username string Your pcloud username
- --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
- --qingstor-access-key-id string QingStor Access Key ID
- --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
- --qingstor-connection-retries int Number of connection retries (default 3)
- --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
- --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
- --qingstor-env-auth Get QingStor credentials from runtime
- --qingstor-secret-access-key string QingStor Secret Access Key (password)
- --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
- --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --qingstor-zone string Zone to connect to
- --s3-access-key-id string AWS Access Key ID
- --s3-acl string Canned ACL used when creating buckets and storing or copying objects
- --s3-bucket-acl string Canned ACL used when creating buckets
- --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
- --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
- --s3-decompress If set this will decompress gzip encoded objects
- --s3-disable-checksum Don\[aq]t store MD5 checksum with object metadata
- --s3-disable-http2 Disable usage of http2 for S3 backends
- --s3-download-url string Custom endpoint for downloads
- --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --s3-endpoint string Endpoint for S3 API
- --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
- --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
- --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
- --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
- --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
- --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
- --s3-location-constraint string Location constraint - must be set to match the Region
- --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
- --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
- --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
- --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
- --s3-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it
- --s3-no-head If set, don\[aq]t HEAD uploaded objects to check integrity
- --s3-no-head-object If set, do not do HEAD before GET when getting objects
- --s3-no-system-metadata Suppress setting and reading of system metadata
- --s3-profile string Profile to use in the shared credentials file
- --s3-provider string Choose your S3 provider
- --s3-region string Region to connect to
- --s3-requester-pays Enables requester pays option when interacting with S3 bucket
- --s3-secret-access-key string AWS Secret Access Key (password)
- --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
- --s3-session-token string An AWS session token
- --s3-shared-credentials-file string Path to the shared credentials file
- --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
- --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
- --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
- --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
- --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
- --s3-storage-class string The storage class to use when storing new objects in S3
- --s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
- --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
- --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
- --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
- --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
- --s3-v2-auth If true use v2 authentication
- --s3-version-at Time Show file versions as they were at the specified time (default off)
- --s3-versions Include old versions in directory listings
- --seafile-2fa Two-factor authentication (\[aq]true\[aq] if the account has 2FA enabled)
- --seafile-create-library Should rclone create a library if it doesn\[aq]t exist
- --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
- --seafile-library string Name of the library
- --seafile-library-key string Library password (for encrypted libraries only) (obscured)
- --seafile-pass string Password (obscured)
- --seafile-url string URL of seafile host to connect to
- --seafile-user string User name (usually email address)
- --sftp-ask-password Allow asking for SFTP password when needed
- --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
- --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
- --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
- --sftp-disable-concurrent-reads If set don\[aq]t use concurrent reads
- --sftp-disable-concurrent-writes If set don\[aq]t use concurrent writes
- --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
- --sftp-host string SSH host to connect to
- --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
- --sftp-key-file string Path to PEM-encoded private key file
- --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
- --sftp-key-pem string Raw PEM-encoded private key
- --sftp-key-use-agent When set forces the usage of the ssh-agent
- --sftp-known-hosts-file string Optional path to known_hosts file
- --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
- --sftp-md5sum-command string The command used to read md5 hashes
- --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
- --sftp-path-override string Override path used by SSH shell commands
- --sftp-port int SSH port number (default 22)
- --sftp-pubkey-file string Optional path to public key file
- --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
- --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
- --sftp-set-modtime Set the modified time on the remote if set (default true)
- --sftp-sha1sum-command string The command used to read sha1 hashes
- --sftp-shell-type string The type of SSH shell on remote server, if any
- --sftp-skip-links Set to skip any symlinks and any other non regular files
- --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default \[dq]sftp\[dq])
- --sftp-use-fstat If set use fstat instead of stat
- --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
- --sftp-user string SSH username (default \[dq]$USER\[dq])
- --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
- --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --sharefile-endpoint string Endpoint for API calls
- --sharefile-root-folder-id string ID of the root folder
- --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
- --sia-api-password string Sia Daemon API Password (obscured)
- --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default \[dq]http://127.0.0.1:9980\[dq])
- --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
- --sia-user-agent string Siad User Agent (default \[dq]Sia-Agent\[dq])
- --skip-links Don\[aq]t warn about skipped symlinks
- --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
- --smb-domain string Domain name for NTLM authentication (default \[dq]WORKGROUP\[dq])
- --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
- --smb-hide-special-share Hide special shares (e.g. print$) which users aren\[aq]t supposed to access (default true)
- --smb-host string SMB server hostname to connect to
- --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
- --smb-pass string SMB password (obscured)
- --smb-port int SMB port number (default 445)
- --smb-spn string Service principal name
- --smb-user string SMB username (default \[dq]$USER\[dq])
- --storj-access-grant string Access grant
- --storj-api-key string API key
- --storj-passphrase string Encryption passphrase
- --storj-provider string Choose an authentication method (default \[dq]existing\[dq])
- --storj-satellite-address string Satellite address (default \[dq]us1.storj.io\[dq])
- --sugarsync-access-key-id string Sugarsync Access Key ID
- --sugarsync-app-id string Sugarsync App ID
- --sugarsync-authorization string Sugarsync authorization
- --sugarsync-authorization-expiry string Sugarsync authorization expiry
- --sugarsync-deleted-id string Sugarsync deleted folder id
- --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
- --sugarsync-hard-delete Permanently delete files if true
- --sugarsync-private-access-key string Sugarsync Private Access Key
- --sugarsync-refresh-token string Sugarsync refresh token
- --sugarsync-root-id string Sugarsync root id
- --sugarsync-user string Sugarsync user
- --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
- --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
- --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
- --swift-auth string Authentication URL for server (OS_AUTH_URL)
- --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
- --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
- --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
- --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
- --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
- --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default \[dq]public\[dq])
- --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
- --swift-key string API key or password (OS_PASSWORD)
- --swift-leave-parts-on-error If true avoid calling abort upload on a failure
- --swift-no-chunk Don\[aq]t chunk files during streaming upload
- --swift-no-large-objects Disable support for static and dynamic large objects
- --swift-region string Region name - optional (OS_REGION_NAME)
- --swift-storage-policy string The storage policy to use when creating a new container
- --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
- --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
- --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
- --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
- --swift-user string User name to log in (OS_USERNAME)
- --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
- --union-action-policy string Policy to choose upstream on ACTION category (default \[dq]epall\[dq])
- --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
- --union-create-policy string Policy to choose upstream on CREATE category (default \[dq]epmfs\[dq])
- --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
- --union-search-policy string Policy to choose upstream on SEARCH category (default \[dq]ff\[dq])
- --union-upstreams string List of space separated upstreams
- --uptobox-access-token string Your access token
- --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
- --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
- --webdav-bearer-token-command string Command to run to get a bearer token
- --webdav-encoding string The encoding for the backend
- --webdav-headers CommaSepList Set HTTP headers for all transactions
- --webdav-pass string Password (obscured)
- --webdav-url string URL of http host to connect to
- --webdav-user string User name
- --webdav-vendor string Name of the WebDAV site/service/software you are using
- --yandex-auth-url string Auth server URL
- --yandex-client-id string OAuth Client Id
- --yandex-client-secret string OAuth Client Secret
- --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
- --yandex-hard-delete Delete files permanently rather than putting them into the trash
- --yandex-token string OAuth Access Token as a JSON blob
- --yandex-token-url string Token server url
- --zoho-auth-url string Auth server URL
- --zoho-client-id string OAuth Client Id
- --zoho-client-secret string OAuth Client Secret
- --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
- --zoho-region string Zoho region to connect to
- --zoho-token string OAuth Access Token as a JSON blob
- --zoho-token-url string Token server url
+ --acd-auth-url string Auth server URL
+ --acd-client-id string OAuth Client Id
+ --acd-client-secret string OAuth Client Secret
+ --acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
+ --acd-token string OAuth Access Token as a JSON blob
+ --acd-token-url string Token server url
+ --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-remote string Remote or path to alias
+ --azureblob-access-tier string Access tier of blob: hot, cool or archive
+ --azureblob-account string Azure Storage Account Name
+ --azureblob-archive-tier-delete Delete archive tier blobs before overwriting
+ --azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
+ --azureblob-client-certificate-password string Password for the certificate file (optional) (obscured)
+ --azureblob-client-certificate-path string Path to a PEM or PKCS12 certificate file including the private key
+ --azureblob-client-id string The ID of the client in use
+ --azureblob-client-secret string One of the service principal\[aq]s client secrets
+ --azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --azureblob-disable-checksum Don\[aq]t store MD5 checksum with object metadata
+ --azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
+ --azureblob-endpoint string Endpoint for the service
+ --azureblob-env-auth Read credentials from runtime (environment variables, CLI or MSI)
+ --azureblob-key string Storage Account Shared Key
+ --azureblob-list-chunk int Size of blob list (default 5000)
+ --azureblob-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --azureblob-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --azureblob-msi-client-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-msi-mi-res-id string Azure resource ID of the user-assigned MSI to use, if any
+ --azureblob-msi-object-id string Object ID of the user-assigned MSI to use, if any
+ --azureblob-no-check-container If set, don\[aq]t attempt to check the container exists or create it
+ --azureblob-no-head-object If set, do not do HEAD before GET when getting objects
+ --azureblob-password string The user\[aq]s password (obscured)
+ --azureblob-public-access string Public access level of a container: blob or container
+ --azureblob-sas-url string SAS URL for container level access only
+ --azureblob-service-principal-file string Path to file containing credentials for use with a service principal
+ --azureblob-tenant string ID of the service principal\[aq]s tenant. Also called its directory ID
+ --azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
+ --azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
+ --azureblob-use-emulator Uses local storage emulator if provided as \[aq]true\[aq]
+ --azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
+ --azureblob-username string User name (usually an email address)
+ --b2-account string Account ID or Application Key ID
+ --b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
+ --b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-disable-checksum Disable checksums for large (> upload cutoff) files
+ --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-url string Custom endpoint for downloads
+ --b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --b2-endpoint string Endpoint for the service
+ --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
+ --b2-key string Application Key
+ --b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
+ --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --b2-version-at Time Show file versions as they were at the specified time (default off)
+ --b2-versions Include old versions in directory listings
+ --box-access-token string Box App Primary Access Token
+ --box-auth-url string Auth server URL
+ --box-box-config-file string Box App config.json location
+ --box-box-sub-type string (default \[dq]user\[dq])
+ --box-client-id string OAuth Client Id
+ --box-client-secret string OAuth Client Secret
+ --box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
+ --box-list-chunk int Size of listing chunk 1-1000 (default 1000)
+ --box-owned-by string Only show items owned by the login (email address) passed in
+ --box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
+ --box-token string OAuth Access Token as a JSON blob
+ --box-token-url string Token server url
+ --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
+ --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
+ --cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming
+ --cache-chunk-path string Directory to cache chunk files (default \[dq]$HOME/.cache/rclone/cache-backend\[dq])
+ --cache-chunk-size SizeSuffix The size of a chunk (partial file data) (default 5Mi)
+ --cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk (default 10Gi)
+ --cache-db-path string Directory to store file structure metadata DB (default \[dq]$HOME/.cache/rclone/cache-backend\[dq])
+ --cache-db-purge Clear all the cached data for this remote on start
+ --cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
+ --cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
+ --cache-plex-password string The password of the Plex user (obscured)
+ --cache-plex-url string The URL of the Plex server
+ --cache-plex-username string The username of the Plex user
+ --cache-read-retries int How many times to retry a read from a cache storage (default 10)
+ --cache-remote string Remote to cache
+ --cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
+ --cache-tmp-upload-path string Directory to keep temporary files until they are uploaded
+ --cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
+ --cache-workers int How many workers should run in parallel to download chunks (default 4)
+ --cache-writes Cache file data on writes through the FS
+ --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
+ --chunker-hash-type string Choose how chunker handles hash sums (default \[dq]md5\[dq])
+ --chunker-remote string Remote to chunk/unchunk
+ --combine-upstreams SpaceSepList Upstreams for combining
+ --compress-level int GZIP compression level (-2 to 9) (default -1)
+ --compress-mode string Compression mode (default \[dq]gzip\[dq])
+ --compress-ram-cache-limit SizeSuffix Some remotes don\[aq]t allow the upload of files with unknown size (default 20Mi)
+ --compress-remote string Remote to compress
+ -L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
+ --crypt-filename-encoding string How to encode the encrypted filename to text string (default \[dq]base32\[dq])
+ --crypt-filename-encryption string How to encrypt the filenames (default \[dq]standard\[dq])
+ --crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
+ --crypt-pass-bad-blocks If set this will pass bad blocks through as all 0
+ --crypt-password string Password or pass phrase for encryption (obscured)
+ --crypt-password2 string Password or pass phrase for salt (obscured)
+ --crypt-remote string Remote to encrypt/decrypt
+ --crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-suffix string If this is set it will override the default suffix of \[dq].bin\[dq] (default \[dq].bin\[dq])
+ --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
+ --drive-allow-import-name-change Allow the filetype to change when uploading Google docs
+ --drive-auth-owner-only Only consider files owned by the authenticated user
+ --drive-auth-url string Auth server URL
+ --drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
+ --drive-client-id string Google Application Client Id
+ --drive-client-secret string OAuth Client Secret
+ --drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-disable-http2 Disable drive using http2 (default true)
+ --drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
+ --drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default \[dq]docx,xlsx,pptx,svg\[dq])
+ --drive-formats string Deprecated: See export_formats
+ --drive-impersonate string Impersonate this user when using a service account
+ --drive-import-formats string Comma separated list of preferred formats for uploading Google docs
+ --drive-keep-revision-forever Keep new head revision of each file forever
+ --drive-list-chunk int Size of listing chunk 100-1000, 0 to disable (default 1000)
+ --drive-pacer-burst int Number of API calls to allow without sleeping (default 100)
+ --drive-pacer-min-sleep Duration Minimum time to sleep between API calls (default 100ms)
+ --drive-resource-key string Resource key for accessing a link-shared file
+ --drive-root-folder-id string ID of the root folder
+ --drive-scope string Scope that rclone should use when requesting access from drive
+ --drive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --drive-service-account-credentials string Service Account Credentials JSON blob
+ --drive-service-account-file string Service Account Credentials JSON file path
+ --drive-shared-with-me Only show files that are shared with me
+ --drive-size-as-quota Show sizes as storage quota usage, not actual size
+ --drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
+ --drive-skip-dangling-shortcuts If set skip dangling shortcut files
+ --drive-skip-gdocs Skip google documents in all listings
+ --drive-skip-shortcuts If set skip shortcut files
+ --drive-starred-only Only show files that are starred
+ --drive-stop-on-download-limit Make download limit errors be fatal
+ --drive-stop-on-upload-limit Make upload limit errors be fatal
+ --drive-team-drive string ID of the Shared Drive (Team Drive)
+ --drive-token string OAuth Access Token as a JSON blob
+ --drive-token-url string Token server url
+ --drive-trashed-only Only show files that are in the trash
+ --drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
+ --drive-use-created-date Use file created date instead of modified date
+ --drive-use-shared-date Use date file was shared instead of modified date
+ --drive-use-trash Send files to the trash instead of deleting permanently (default true)
+ --drive-v2-download-min-size SizeSuffix If Object\[aq]s are greater, use drive v2 API to download (default off)
+ --dropbox-auth-url string Auth server URL
+ --dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
+ --dropbox-batch-mode string Upload file batching sync|async|off (default \[dq]sync\[dq])
+ --dropbox-batch-size int Max number of files in upload batch
+ --dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
+ --dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
+ --dropbox-client-id string OAuth Client Id
+ --dropbox-client-secret string OAuth Client Secret
+ --dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
+ --dropbox-impersonate string Impersonate this user when using a business account
+ --dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --dropbox-shared-files Instructs rclone to work on individual shared files
+ --dropbox-shared-folders Instructs rclone to work on shared folders
+ --dropbox-token string OAuth Access Token as a JSON blob
+ --dropbox-token-url string Token server url
+ --fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
+ --fichier-cdn Set if you wish to use CDN download links
+ --fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
+ --fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
+ --fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
+ --fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --filefabric-permanent-token string Permanent Authentication Token
+ --filefabric-root-folder-id string ID of the root folder
+ --filefabric-token string Session Token
+ --filefabric-token-expiry string Token expiry time
+ --filefabric-url string URL of the Enterprise File Fabric to connect to
+ --filefabric-version string Version read from the file fabric
+ --ftp-ask-password Allow asking for FTP password when needed
+ --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
+ --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-disable-epsv Disable using EPSV even if server advertises support
+ --ftp-disable-mlsd Disable using MLSD even if server advertises support
+ --ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
+ --ftp-disable-utf8 Disable using UTF-8 even if server advertises support
+ --ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
+ --ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
+ --ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
+ --ftp-host string FTP host to connect to
+ --ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --ftp-no-check-certificate Do not verify the TLS certificate of the server
+ --ftp-pass string FTP password (obscured)
+ --ftp-port int FTP port number (default 21)
+ --ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
+ --ftp-tls Use Implicit FTPS (FTP over TLS)
+ --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
+ --ftp-user string FTP username (default \[dq]$USER\[dq])
+ --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
+ --gcs-anonymous Access public buckets and objects without credentials
+ --gcs-auth-url string Auth server URL
+ --gcs-bucket-acl string Access Control List for new buckets
+ --gcs-bucket-policy-only Access checks should use bucket-level IAM policies
+ --gcs-client-id string OAuth Client Id
+ --gcs-client-secret string OAuth Client Secret
+ --gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gcs-endpoint string Endpoint for the service
+ --gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
+ --gcs-location string Location for the newly created buckets
+ --gcs-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it
+ --gcs-object-acl string Access Control List for new objects
+ --gcs-project-number string Project number
+ --gcs-service-account-file string Service Account Credentials JSON file path
+ --gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage
+ --gcs-token string OAuth Access Token as a JSON blob
+ --gcs-token-url string Token server url
+ --gcs-user-project string User project
+ --gphotos-auth-url string Auth server URL
+ --gphotos-client-id string OAuth Client Id
+ --gphotos-client-secret string OAuth Client Secret
+ --gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+ --gphotos-include-archived Also view and download archived media
+ --gphotos-read-only Set to make the Google Photos backend read only
+ --gphotos-read-size Set to read the size of media items
+ --gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
+ --gphotos-token string OAuth Access Token as a JSON blob
+ --gphotos-token-url string Token server url
+ --hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
+ --hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
+ --hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
+ --hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
+ --hdfs-namenode string Hadoop name node and port
+ --hdfs-service-principal-name string Kerberos service principal name for the namenode
+ --hdfs-username string Hadoop user name
+ --hidrive-auth-url string Auth server URL
+ --hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
+ --hidrive-client-id string OAuth Client Id
+ --hidrive-client-secret string OAuth Client Secret
+ --hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
+ --hidrive-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --hidrive-endpoint string Endpoint for the service (default \[dq]https://api.hidrive.strato.com/2.1\[dq])
+ --hidrive-root-prefix string The root/parent folder for all paths (default \[dq]/\[dq])
+ --hidrive-scope-access string Access permissions that rclone should use when requesting access from HiDrive (default \[dq]rw\[dq])
+ --hidrive-scope-role string User-level that rclone should use when requesting access from HiDrive (default \[dq]user\[dq])
+ --hidrive-token string OAuth Access Token as a JSON blob
+ --hidrive-token-url string Token server url
+ --hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
+ --hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-headers CommaSepList Set HTTP headers for all transactions
+ --http-no-head Don\[aq]t use HEAD requests
+ --http-no-slash Set this if the site doesn\[aq]t end directories with /
+ --http-url string URL of HTTP host to connect to
+ --internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-disable-checksum Don\[aq]t ask the server to test against MD5 checksum calculated by rclone (default true)
+ --internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
+ --internetarchive-endpoint string IAS3 Endpoint (default \[dq]https://s3.us.archive.org\[dq])
+ --internetarchive-front-endpoint string Host of InternetArchive Frontend (default \[dq]https://archive.org\[dq])
+ --internetarchive-secret-access-key string IAS3 Secret Key (password)
+ --internetarchive-wait-archive Duration Timeout for waiting the server\[aq]s processing tasks (specifically archive and book_op) to finish (default 0s)
+ --jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
+ --jottacloud-hard-delete Delete files permanently rather than putting them into the trash
+ --jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
+ --jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
+ --jottacloud-trashed-only Only show files that are in the trash
+ --jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail\[aq]s (default 10Mi)
+ --koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --koofr-endpoint string The Koofr API endpoint to use
+ --koofr-mountid string Mount ID of the mount to use
+ --koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
+ --koofr-provider string Choose your storage provider
+ --koofr-setmtime Does the backend support setting modification time (default true)
+ --koofr-user string Your user name
+ -l, --links Translate symlinks to/from regular files with a \[aq].rclonelink\[aq] extension
+ --local-case-insensitive Force the filesystem to report itself as case insensitive
+ --local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
+ --local-no-check-updated Don\[aq]t check to see if the files change during upload
+ --local-no-preallocate Disable preallocation of disk space for transferred files
+ --local-no-set-modtime Disable setting modtime
+ --local-no-sparse Disable sparse files for multi-thread downloads
+ --local-nounc Disable UNC (long path names) conversion on Windows
+ --local-unicode-normalization Apply unicode NFC normalization to paths and filenames
+ --local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
+ --mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
+ --mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --mailru-pass string Password (obscured)
+ --mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
+ --mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default \[dq]*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf\[dq])
+ --mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
+ --mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
+ --mailru-user string User name (usually email)
+ --mega-debug Output more debug from Mega
+ --mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --mega-hard-delete Delete files permanently rather than putting them into the trash
+ --mega-pass string Password (obscured)
+ --mega-use-https Use HTTPS for transfers
+ --mega-user string User name
+ --netstorage-account string Set the NetStorage account name
+ --netstorage-host string Domain+path of NetStorage host to connect to
+ --netstorage-protocol string Select between HTTP or HTTPS protocol (default \[dq]https\[dq])
+ --netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
+ -x, --one-file-system Don\[aq]t cross filesystem boundaries (unix/macOS only)
+ --onedrive-access-scopes SpaceSepList Set scopes to be requested by rclone (default Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access)
+ --onedrive-auth-url string Auth server URL
+ --onedrive-av-override Allows download of files the server thinks has a virus
+ --onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
+ --onedrive-client-id string OAuth Client Id
+ --onedrive-client-secret string OAuth Client Secret
+ --onedrive-drive-id string The ID of the drive to use
+ --onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
+ --onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
+ --onedrive-hash-type string Specify the hash in use for the backend (default \[dq]auto\[dq])
+ --onedrive-link-password string Set the password for links created by the link command
+ --onedrive-link-scope string Set the scope of the links created by the link command (default \[dq]anonymous\[dq])
+ --onedrive-link-type string Set the type of the links created by the link command (default \[dq]view\[dq])
+ --onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-no-versions Remove all versions on modifying operations
+ --onedrive-region string Choose national cloud region for OneDrive (default \[dq]global\[dq])
+ --onedrive-root-folder-id string ID of the root folder
+ --onedrive-server-side-across-configs Deprecated: use --server-side-across-configs instead
+ --onedrive-token string OAuth Access Token as a JSON blob
+ --onedrive-token-url string Token server url
+ --oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --oos-compartment string Object storage compartment OCID
+ --oos-config-file string Path to OCI config file (default \[dq]\[ti]/.oci/config\[dq])
+ --oos-config-profile string Profile name inside the oci config file (default \[dq]Default\[dq])
+ --oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-disable-checksum Don\[aq]t store MD5 checksum with object metadata
+ --oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --oos-endpoint string Endpoint for Object storage API
+ --oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --oos-namespace string Object storage namespace
+ --oos-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it
+ --oos-provider string Choose your Auth Provider (default \[dq]env_auth\[dq])
+ --oos-region string Object storage Region
+ --oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies \[dq]AES256\[dq] as the encryption algorithm
+ --oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
+ --oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
+ --oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
+ --oos-sse-kms-key-id string if using your own master key in vault, this header specifies the
+ --oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default \[dq]Standard\[dq])
+ --oos-upload-concurrency int Concurrency for multipart uploads (default 10)
+ --oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
+ --opendrive-password string Password (obscured)
+ --opendrive-username string Username
+ --pcloud-auth-url string Auth server URL
+ --pcloud-client-id string OAuth Client Id
+ --pcloud-client-secret string OAuth Client Secret
+ --pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --pcloud-hostname string Hostname to connect to (default \[dq]api.pcloud.com\[dq])
+ --pcloud-password string Your pcloud password (obscured)
+ --pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default \[dq]d0\[dq])
+ --pcloud-token string OAuth Access Token as a JSON blob
+ --pcloud-token-url string Token server url
+ --pcloud-username string Your pcloud username
+ --pikpak-auth-url string Auth server URL
+ --pikpak-client-id string OAuth Client Id
+ --pikpak-client-secret string OAuth Client Secret
+ --pikpak-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
+ --pikpak-pass string Pikpak password (obscured)
+ --pikpak-root-folder-id string ID of the root folder
+ --pikpak-token string OAuth Access Token as a JSON blob
+ --pikpak-token-url string Token server url
+ --pikpak-trashed-only Only show files that are in the trash
+ --pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
+ --pikpak-user string Pikpak username
+ --premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+ --qingstor-access-key-id string QingStor Access Key ID
+ --qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
+ --qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
+ --qingstor-endpoint string Enter an endpoint URL to connection QingStor API
+ --qingstor-env-auth Get QingStor credentials from runtime
+ --qingstor-secret-access-key string QingStor Secret Access Key (password)
+ --qingstor-upload-concurrency int Concurrency for multipart uploads (default 1)
+ --qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --qingstor-zone string Zone to connect to
+ --s3-access-key-id string AWS Access Key ID
+ --s3-acl string Canned ACL used when creating buckets and storing or copying objects
+ --s3-bucket-acl string Canned ACL used when creating buckets
+ --s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
+ --s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
+ --s3-decompress If set this will decompress gzip encoded objects
+ --s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
+ --s3-disable-checksum Don\[aq]t store MD5 checksum with object metadata
+ --s3-disable-http2 Disable usage of http2 for S3 backends
+ --s3-download-url string Custom endpoint for downloads
+ --s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
+ --s3-endpoint string Endpoint for S3 API
+ --s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
+ --s3-force-path-style If true use path style access if false use virtual hosted style (default true)
+ --s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+ --s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
+ --s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
+ --s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
+ --s3-location-constraint string Location constraint - must be set to match the Region
+ --s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
+ --s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
+ --s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
+ --s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
+ --s3-no-check-bucket If set, don\[aq]t attempt to check the bucket exists or create it
+ --s3-no-head If set, don\[aq]t HEAD uploaded objects to check integrity
+ --s3-no-head-object If set, do not do HEAD before GET when getting objects
+ --s3-no-system-metadata Suppress setting and reading of system metadata
+ --s3-profile string Profile to use in the shared credentials file
+ --s3-provider string Choose your S3 provider
+ --s3-region string Region to connect to
+ --s3-requester-pays Enables requester pays option when interacting with S3 bucket
+ --s3-secret-access-key string AWS Secret Access Key (password)
+ --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
+ --s3-session-token string An AWS session token
+ --s3-shared-credentials-file string Path to the shared credentials file
+ --s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
+ --s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
+ --s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
+ --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
+ --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
+ --s3-storage-class string The storage class to use when storing new objects in S3
+ --s3-sts-endpoint string Endpoint for STS
+ --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
+ --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
+ --s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
+ --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
+ --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
+ --s3-v2-auth If true use v2 authentication
+ --s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-versions Include old versions in directory listings
+ --seafile-2fa Two-factor authentication (\[aq]true\[aq] if the account has 2FA enabled)
+ --seafile-create-library Should rclone create a library if it doesn\[aq]t exist
+ --seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
+ --seafile-library string Name of the library
+ --seafile-library-key string Library password (for encrypted libraries only) (obscured)
+ --seafile-pass string Password (obscured)
+ --seafile-url string URL of seafile host to connect to
+ --seafile-user string User name (usually email address)
+ --sftp-ask-password Allow asking for SFTP password when needed
+ --sftp-chunk-size SizeSuffix Upload and download chunk size (default 32Ki)
+ --sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
+ --sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
+ --sftp-disable-concurrent-reads If set don\[aq]t use concurrent reads
+ --sftp-disable-concurrent-writes If set don\[aq]t use concurrent writes
+ --sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
+ --sftp-host string SSH host to connect to
+ --sftp-host-key-algorithms SpaceSepList Space separated list of host key algorithms, ordered by preference
+ --sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
+ --sftp-key-file string Path to PEM-encoded private key file
+ --sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file (obscured)
+ --sftp-key-pem string Raw PEM-encoded private key
+ --sftp-key-use-agent When set forces the usage of the ssh-agent
+ --sftp-known-hosts-file string Optional path to known_hosts file
+ --sftp-macs SpaceSepList Space separated list of MACs (message authentication code) algorithms, ordered by preference
+ --sftp-md5sum-command string The command used to read md5 hashes
+ --sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
+ --sftp-path-override string Override path used by SSH shell commands
+ --sftp-port int SSH port number (default 22)
+ --sftp-pubkey-file string Optional path to public key file
+ --sftp-server-command string Specifies the path or command to run a sftp server on the remote host
+ --sftp-set-env SpaceSepList Environment variables to pass to sftp and commands
+ --sftp-set-modtime Set the modified time on the remote if set (default true)
+ --sftp-sha1sum-command string The command used to read sha1 hashes
+ --sftp-shell-type string The type of SSH shell on remote server, if any
+ --sftp-skip-links Set to skip any symlinks and any other non regular files
+ --sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default \[dq]sftp\[dq])
+ --sftp-use-fstat If set use fstat instead of stat
+ --sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
+ --sftp-user string SSH username (default \[dq]$USER\[dq])
+ --sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
+ --sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --sharefile-endpoint string Endpoint for API calls
+ --sharefile-root-folder-id string ID of the root folder
+ --sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
+ --sia-api-password string Sia Daemon API Password (obscured)
+ --sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default \[dq]http://127.0.0.1:9980\[dq])
+ --sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
+ --sia-user-agent string Siad User Agent (default \[dq]Sia-Agent\[dq])
+ --skip-links Don\[aq]t warn about skipped symlinks
+ --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-domain string Domain name for NTLM authentication (default \[dq]WORKGROUP\[dq])
+ --smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
+ --smb-hide-special-share Hide special shares (e.g. print$) which users aren\[aq]t supposed to access (default true)
+ --smb-host string SMB server hostname to connect to
+ --smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
+ --smb-pass string SMB password (obscured)
+ --smb-port int SMB port number (default 445)
+ --smb-spn string Service principal name
+ --smb-user string SMB username (default \[dq]$USER\[dq])
+ --storj-access-grant string Access grant
+ --storj-api-key string API key
+ --storj-passphrase string Encryption passphrase
+ --storj-provider string Choose an authentication method (default \[dq]existing\[dq])
+ --storj-satellite-address string Satellite address (default \[dq]us1.storj.io\[dq])
+ --sugarsync-access-key-id string Sugarsync Access Key ID
+ --sugarsync-app-id string Sugarsync App ID
+ --sugarsync-authorization string Sugarsync authorization
+ --sugarsync-authorization-expiry string Sugarsync authorization expiry
+ --sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
+ --sugarsync-hard-delete Permanently delete files if true
+ --sugarsync-private-access-key string Sugarsync Private Access Key
+ --sugarsync-refresh-token string Sugarsync refresh token
+ --sugarsync-root-id string Sugarsync root id
+ --sugarsync-user string Sugarsync user
+ --swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
+ --swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
+ --swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
+ --swift-auth string Authentication URL for server (OS_AUTH_URL)
+ --swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+ --swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+ --swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+ --swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
+ --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default \[dq]public\[dq])
+ --swift-env-auth Get swift credentials from environment variables in standard OpenStack form
+ --swift-key string API key or password (OS_PASSWORD)
+ --swift-leave-parts-on-error If true avoid calling abort upload on a failure
+ --swift-no-chunk Don\[aq]t chunk files during streaming upload
+ --swift-no-large-objects Disable support for static and dynamic large objects
+ --swift-region string Region name - optional (OS_REGION_NAME)
+ --swift-storage-policy string The storage policy to use when creating a new container
+ --swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
+ --swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+ --swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+ --swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+ --swift-user string User name to log in (OS_USERNAME)
+ --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
+ --union-action-policy string Policy to choose upstream on ACTION category (default \[dq]epall\[dq])
+ --union-cache-time int Cache time of usage and free space (in seconds) (default 120)
+ --union-create-policy string Policy to choose upstream on CREATE category (default \[dq]epmfs\[dq])
+ --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
+ --union-search-policy string Policy to choose upstream on SEARCH category (default \[dq]ff\[dq])
+ --union-upstreams string List of space separated upstreams
+ --uptobox-access-token string Your access token
+ --uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
+ --uptobox-private Set to make uploaded files private
+ --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
+ --webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-encoding string The encoding for the backend
+ --webdav-headers CommaSepList Set HTTP headers for all transactions
+ --webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
+ --webdav-pass string Password (obscured)
+ --webdav-url string URL of http host to connect to
+ --webdav-user string User name
+ --webdav-vendor string Name of the WebDAV site/service/software you are using
+ --yandex-auth-url string Auth server URL
+ --yandex-client-id string OAuth Client Id
+ --yandex-client-secret string OAuth Client Secret
+ --yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+ --yandex-hard-delete Delete files permanently rather than putting them into the trash
+ --yandex-token string OAuth Access Token as a JSON blob
+ --yandex-token-url string Token server url
+ --zoho-auth-url string Auth server URL
+ --zoho-client-id string OAuth Client Id
+ --zoho-client-secret string OAuth Client Secret
+ --zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
+ --zoho-region string Zoho region to connect to
+ --zoho-token string OAuth Access Token as a JSON blob
+ --zoho-token-url string Token server url
\f[R]
.fi
.SH Docker Volume Plugin
@@ -22561,7 +22939,7 @@ root.
# NOTICE: If you make changes to this file you MUST do a --resync run.
# Run with --dry-run to see what changes will be made.
-# Dropbox wont sync some files so filter them away here.
+# Dropbox won\[aq]t sync some files so filter them away here.
# See https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
- .dropbox.attr
- \[ti]*.tmp
@@ -23037,7 +23415,7 @@ and re-creating the parent would change its ID.
\f[C]delete-file \f[R] Delete a single file.
.IP \[bu] 2
\f[C]delete-glob \f[R] Delete a group of files located
-one level deep in the given directory with names maching a given glob
+one level deep in the given directory with names matching a given glob
pattern.
.IP \[bu] 2
\f[C]touch-glob YYYY-MM-DD \f[R] Change modification time
@@ -23468,6 +23846,19 @@ Env Var: RCLONE_FICHIER_FOLDER_PASSWORD
Type: string
.IP \[bu] 2
Required: false
+.SS --fichier-cdn
+.PP
+Set if you wish to use CDN download links.
+.PP
+Properties:
+.IP \[bu] 2
+Config: cdn
+.IP \[bu] 2
+Env Var: RCLONE_FICHIER_CDN
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --fichier-encoding
.PP
The encoding for the backend.
@@ -24048,6 +24439,8 @@ DigitalOcean Spaces
.IP \[bu] 2
Dreamhost
.IP \[bu] 2
+GCS
+.IP \[bu] 2
Huawei OBS
.IP \[bu] 2
IBM COS S3
@@ -24060,6 +24453,8 @@ Liara Object Storage
.IP \[bu] 2
Minio
.IP \[bu] 2
+Petabox
+.IP \[bu] 2
Qiniu Cloud Object Storage (Kodo)
.IP \[bu] 2
RackCorp Object Storage
@@ -24482,6 +24877,15 @@ server side encryption (SSE-AWS or SSE-C) the \f[C]ETag\f[R] header is
no longer the MD5 sum of the data, so rclone adds an additional piece of
metadata \f[C]X-Amz-Meta-Md5chksum\f[R] which is a base64 encoded MD5
hash (in the same format as is required for \f[C]Content-MD5\f[R]).
+You can use base64 -d and hexdump to check this value manually:
+.IP
+.nf
+\f[C]
+echo \[aq]VWTGdNx3LyXQDfA0e2Edxw==\[aq] | base64 -d | hexdump
+\f[R]
+.fi
+.PP
+or you can use \f[C]rclone check\f[R] to verify the hashes are OK.
.PP
For large objects, calculating this hash can take some time so the
addition of this hash can be disabled with
@@ -24710,7 +25114,7 @@ Session Token: \f[C]AWS_SESSION_TOKEN\f[R] (optional)
.RE
.IP \[bu] 2
Or, use a named
-profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-multiple-profiles.html):
+profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html):
.RS 2
.IP \[bu] 2
Profile files are standard files used by AWS CLI tools
@@ -24846,10 +25250,11 @@ all the files to be uploaded as multipart.
.SS Standard options
.PP
Here are the Standard options specific to s3 (Amazon S3 Compliant
-Storage Providers including AWS, Alibaba, Ceph, China Mobile,
-Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
-IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp,
-Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China
+Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
+IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox,
+RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and
+Wasabi).
.SS --s3-provider
.PP
Choose your S3 provider.
@@ -24879,6 +25284,12 @@ Amazon Web Services (AWS) S3
Alibaba Cloud Object Storage System (OSS) formerly Aliyun
.RE
.IP \[bu] 2
+\[dq]ArvanCloud\[dq]
+.RS 2
+.IP \[bu] 2
+Arvan Cloud Object Storage (AOS)
+.RE
+.IP \[bu] 2
\[dq]Ceph\[dq]
.RS 2
.IP \[bu] 2
@@ -24897,12 +25308,6 @@ China Mobile Ecloud Elastic Object Storage (EOS)
Cloudflare R2 Storage
.RE
.IP \[bu] 2
-\[dq]ArvanCloud\[dq]
-.RS 2
-.IP \[bu] 2
-Arvan Cloud Object Storage (AOS)
-.RE
-.IP \[bu] 2
\[dq]DigitalOcean\[dq]
.RS 2
.IP \[bu] 2
@@ -24915,6 +25320,12 @@ DigitalOcean Spaces
Dreamhost DreamObjects
.RE
.IP \[bu] 2
+\[dq]GCS\[dq]
+.RS 2
+.IP \[bu] 2
+Google Cloud Storage
+.RE
+.IP \[bu] 2
\[dq]HuaweiOBS\[dq]
.RS 2
.IP \[bu] 2
@@ -24963,6 +25374,12 @@ Minio Object Storage
Netease Object Storage (NOS)
.RE
.IP \[bu] 2
+\[dq]Petabox\[dq]
+.RS 2
+.IP \[bu] 2
+Petabox Object Storage
+.RE
+.IP \[bu] 2
\[dq]RackCorp\[dq]
.RS 2
.IP \[bu] 2
@@ -25724,6 +26141,56 @@ Logrono, Spain
.RE
.SS --s3-region
.PP
+Region where your bucket will be created and your data stored.
+.PP
+Properties:
+.IP \[bu] 2
+Config: region
+.IP \[bu] 2
+Env Var: RCLONE_S3_REGION
+.IP \[bu] 2
+Provider: Petabox
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.IP \[bu] 2
+Examples:
+.RS 2
+.IP \[bu] 2
+\[dq]us-east-1\[dq]
+.RS 2
+.IP \[bu] 2
+US East (N.
+Virginia)
+.RE
+.IP \[bu] 2
+\[dq]eu-central-1\[dq]
+.RS 2
+.IP \[bu] 2
+Europe (Frankfurt)
+.RE
+.IP \[bu] 2
+\[dq]ap-southeast-1\[dq]
+.RS 2
+.IP \[bu] 2
+Asia Pacific (Singapore)
+.RE
+.IP \[bu] 2
+\[dq]me-south-1\[dq]
+.RS 2
+.IP \[bu] 2
+Middle East (Bahrain)
+.RE
+.IP \[bu] 2
+\[dq]sa-east-1\[dq]
+.RS 2
+.IP \[bu] 2
+South America (S\[~a]o Paulo)
+.RE
+.RE
+.SS --s3-region
+.PP
Region to connect to.
.PP
Leave blank if you are using an S3 clone and you don\[aq]t have a
@@ -25736,7 +26203,7 @@ Config: region
Env Var: RCLONE_S3_REGION
.IP \[bu] 2
Provider:
-!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
+!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
.IP \[bu] 2
Type: string
.IP \[bu] 2
@@ -25999,15 +26466,15 @@ Required: false
Examples:
.RS 2
.IP \[bu] 2
-\[dq]s3.ir-thr-at1.arvanstorage.com\[dq]
+\[dq]s3.ir-thr-at1.arvanstorage.ir\[dq]
.RS 2
.IP \[bu] 2
The default endpoint - a good choice if you are unsure.
.IP \[bu] 2
-Tehran Iran (Asiatech)
+Tehran Iran (Simin)
.RE
.IP \[bu] 2
-\[dq]s3.ir-tbz-sh1.arvanstorage.com\[dq]
+\[dq]s3.ir-tbz-sh1.arvanstorage.ir\[dq]
.RS 2
.IP \[bu] 2
Tabriz Iran (Shahriar)
@@ -26447,6 +26914,65 @@ Logrono, Spain
.RE
.SS --s3-endpoint
.PP
+Endpoint for Petabox S3 Object Storage.
+.PP
+Specify the endpoint from the same region.
+.PP
+Properties:
+.IP \[bu] 2
+Config: endpoint
+.IP \[bu] 2
+Env Var: RCLONE_S3_ENDPOINT
+.IP \[bu] 2
+Provider: Petabox
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: true
+.IP \[bu] 2
+Examples:
+.RS 2
+.IP \[bu] 2
+\[dq]s3.petabox.io\[dq]
+.RS 2
+.IP \[bu] 2
+US East (N.
+Virginia)
+.RE
+.IP \[bu] 2
+\[dq]s3.us-east-1.petabox.io\[dq]
+.RS 2
+.IP \[bu] 2
+US East (N.
+Virginia)
+.RE
+.IP \[bu] 2
+\[dq]s3.eu-central-1.petabox.io\[dq]
+.RS 2
+.IP \[bu] 2
+Europe (Frankfurt)
+.RE
+.IP \[bu] 2
+\[dq]s3.ap-southeast-1.petabox.io\[dq]
+.RS 2
+.IP \[bu] 2
+Asia Pacific (Singapore)
+.RE
+.IP \[bu] 2
+\[dq]s3.me-south-1.petabox.io\[dq]
+.RS 2
+.IP \[bu] 2
+Middle East (Bahrain)
+.RE
+.IP \[bu] 2
+\[dq]s3.sa-east-1.petabox.io\[dq]
+.RS 2
+.IP \[bu] 2
+South America (S\[~a]o Paulo)
+.RE
+.RE
+.SS --s3-endpoint
+.PP
Endpoint for Liara Object Storage API.
.PP
Properties:
@@ -26826,6 +27352,31 @@ EU Endpoint
.RE
.SS --s3-endpoint
.PP
+Endpoint for Google Cloud Storage.
+.PP
+Properties:
+.IP \[bu] 2
+Config: endpoint
+.IP \[bu] 2
+Env Var: RCLONE_S3_ENDPOINT
+.IP \[bu] 2
+Provider: GCS
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.IP \[bu] 2
+Examples:
+.RS 2
+.IP \[bu] 2
+\[dq]https://storage.googleapis.com\[dq]
+.RS 2
+.IP \[bu] 2
+Google Cloud Storage endpoint
+.RE
+.RE
+.SS --s3-endpoint
+.PP
Endpoint for Storj Gateway.
.PP
Properties:
@@ -27189,7 +27740,7 @@ Config: endpoint
Env Var: RCLONE_S3_ENDPOINT
.IP \[bu] 2
Provider:
-!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu
+!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox
.IP \[bu] 2
Type: string
.IP \[bu] 2
@@ -27350,10 +27901,16 @@ Wasabi AP Southeast 2 (Sydney)
Liara Iran endpoint
.RE
.IP \[bu] 2
-\[dq]s3.ir-thr-at1.arvanstorage.com\[dq]
+\[dq]s3.ir-thr-at1.arvanstorage.ir\[dq]
.RS 2
.IP \[bu] 2
-ArvanCloud Tehran Iran (Asiatech) endpoint
+ArvanCloud Tehran Iran (Simin) endpoint
+.RE
+.IP \[bu] 2
+\[dq]s3.ir-tbz-sh1.arvanstorage.ir\[dq]
+.RS 2
+.IP \[bu] 2
+ArvanCloud Tabriz Iran (Shahriar) endpoint
.RE
.RE
.SS --s3-location-constraint
@@ -27752,7 +28309,7 @@ Examples:
\[dq]ir-thr-at1\[dq]
.RS 2
.IP \[bu] 2
-Tehran Iran (Asiatech)
+Tehran Iran (Simin)
.RE
.IP \[bu] 2
\[dq]ir-tbz-sh1\[dq]
@@ -28185,7 +28742,7 @@ Config: location_constraint
Env Var: RCLONE_S3_LOCATION_CONSTRAINT
.IP \[bu] 2
Provider:
-!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS
+!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox
.IP \[bu] 2
Type: string
.IP \[bu] 2
@@ -28680,6 +29237,8 @@ Default.
The Standard class for any upload.
.IP \[bu] 2
Suitable for on-demand content like streaming or CDN.
+.IP \[bu] 2
+Available in all regions.
.RE
.IP \[bu] 2
\[dq]GLACIER\[dq]
@@ -28688,6 +29247,19 @@ Suitable for on-demand content like streaming or CDN.
Archived storage.
.IP \[bu] 2
Prices are lower, but it needs to be restored first to be accessed.
+.IP \[bu] 2
+Available in FR-PAR and NL-AMS regions.
+.RE
+.IP \[bu] 2
+\[dq]ONEZONE_IA\[dq]
+.RS 2
+.IP \[bu] 2
+One Zone - Infrequent Access.
+.IP \[bu] 2
+A good choice for storing secondary backup copies or easily re-creatable
+data.
+.IP \[bu] 2
+Available in the FR-PAR region only.
.RE
.RE
.SS --s3-storage-class
@@ -28736,10 +29308,11 @@ Deep archive storage mode
.SS Advanced options
.PP
Here are the Advanced options specific to s3 (Amazon S3 Compliant
-Storage Providers including AWS, Alibaba, Ceph, China Mobile,
-Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
-IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp,
-Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
+Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China
+Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS,
+IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox,
+RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and
+Wasabi).
.SS --s3-bucket-acl
.PP
Canned ACL used when creating buckets.
@@ -29434,6 +30007,23 @@ Env Var: RCLONE_S3_DOWNLOAD_URL
Type: string
.IP \[bu] 2
Required: false
+.SS --s3-directory-markers
+.PP
+Upload an empty object with a trailing slash when a new directory is
+created
+.PP
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with \[dq]/\[dq], to persist the folder.
+.PP
+Properties:
+.IP \[bu] 2
+Config: directory_markers
+.IP \[bu] 2
+Env Var: RCLONE_S3_DIRECTORY_MARKERS
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --s3-use-multipart-etag
.PP
Whether to use ETag in multipart uploads for verification
@@ -29565,6 +30155,35 @@ Env Var: RCLONE_S3_MIGHT_GZIP
Type: Tristate
.IP \[bu] 2
Default: unset
+.SS --s3-use-accept-encoding-gzip
+.PP
+Whether to send \f[C]Accept-Encoding: gzip\f[R] header.
+.PP
+By default, rclone will append \f[C]Accept-Encoding: gzip\f[R] to the
+request to download compressed objects whenever possible.
+.PP
+However some providers such as Google Cloud Storage may alter the HTTP
+headers, breaking the signature of the request.
+.PP
+A symptom of this would be receiving errors like
+.IP
+.nf
+\f[C]
+SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
+\f[R]
+.fi
+.PP
+In this case, you might want to try disabling this option.
+.PP
+Properties:
+.IP \[bu] 2
+Config: use_accept_encoding_gzip
+.IP \[bu] 2
+Env Var: RCLONE_S3_USE_ACCEPT_ENCODING_GZIP
+.IP \[bu] 2
+Type: Tristate
+.IP \[bu] 2
+Default: unset
.SS --s3-no-system-metadata
.PP
Suppress setting and reading of system metadata
@@ -30176,6 +30795,27 @@ server_side_encryption =
storage_class =
\f[R]
.fi
+.SS Google Cloud Storage
+.PP
+GoogleCloudStorage (https://cloud.google.com/storage/docs) is an
+S3-interoperable (https://cloud.google.com/storage/docs/interoperability)
+object storage service from Google Cloud Platform.
+.PP
+To connect to Google Cloud Storage you will need an access key and
+secret key.
+These can be retrieved by creating an HMAC
+key (https://cloud.google.com/storage/docs/authentication/managing-hmackeys).
+.IP
+.nf
+\f[C]
+[gs]
+type = s3
+provider = GCS
+access_key_id = your_access_key
+secret_access_key = your_secret_key
+endpoint = https://storage.googleapis.com
+\f[R]
+.fi
.SS DigitalOcean Spaces
.PP
Spaces (https://www.digitalocean.com/products/object-storage/) is an
@@ -32397,6 +33037,172 @@ For Netease NOS configure as per the configurator
\f[C]rclone config\f[R] setting the provider \f[C]Netease\f[R].
This will automatically set \f[C]force_path_style = false\f[R] which is
necessary for it to run properly.
+.SS Petabox
+.PP
+Here is an example of making a Petabox (https://petabox.io/)
+configuration.
+First run:
+.IP
+.nf
+\f[C]
+rclone config
+\f[R]
+.fi
+.PP
+This will guide you through an interactive setup process.
+.IP
+.nf
+\f[C]
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+n/s> n
+
+Enter name for new remote.
+name> My Petabox Storage
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \[rs] \[dq]s3\[dq]
+[snip]
+Storage> s3
+
+Option provider.
+Choose your S3 provider.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+[snip]
+XX / Petabox Object Storage
+ \[rs] (Petabox)
+[snip]
+provider> Petabox
+
+Option env_auth.
+Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
+Only applies if access_key_id and secret_access_key is blank.
+Choose a number from below, or type in your own boolean value (true or false).
+Press Enter for the default (false).
+ 1 / Enter AWS credentials in the next step.
+ \[rs] (false)
+ 2 / Get AWS credentials from the environment (env vars or IAM).
+ \[rs] (true)
+env_auth> 1
+
+Option access_key_id.
+AWS Access Key ID.
+Leave blank for anonymous access or runtime credentials.
+Enter a value. Press Enter to leave empty.
+access_key_id> YOUR_ACCESS_KEY_ID
+
+Option secret_access_key.
+AWS Secret Access Key (password).
+Leave blank for anonymous access or runtime credentials.
+Enter a value. Press Enter to leave empty.
+secret_access_key> YOUR_SECRET_ACCESS_KEY
+
+Option region.
+Region where your bucket will be created and your data stored.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ 1 / US East (N. Virginia)
+ \[rs] (us-east-1)
+ 2 / Europe (Frankfurt)
+ \[rs] (eu-central-1)
+ 3 / Asia Pacific (Singapore)
+ \[rs] (ap-southeast-1)
+ 4 / Middle East (Bahrain)
+ \[rs] (me-south-1)
+ 5 / South America (S\[~a]o Paulo)
+ \[rs] (sa-east-1)
+region> 1
+
+Option endpoint.
+Endpoint for Petabox S3 Object Storage.
+Specify the endpoint from the same region.
+Choose a number from below, or type in your own value.
+ 1 / US East (N. Virginia)
+ \[rs] (s3.petabox.io)
+ 2 / US East (N. Virginia)
+ \[rs] (s3.us-east-1.petabox.io)
+ 3 / Europe (Frankfurt)
+ \[rs] (s3.eu-central-1.petabox.io)
+ 4 / Asia Pacific (Singapore)
+ \[rs] (s3.ap-southeast-1.petabox.io)
+ 5 / Middle East (Bahrain)
+ \[rs] (s3.me-south-1.petabox.io)
+ 6 / South America (S\[~a]o Paulo)
+ \[rs] (s3.sa-east-1.petabox.io)
+endpoint> 1
+
+Option acl.
+Canned ACL used when creating buckets and storing or copying objects.
+This ACL is used for creating objects and if bucket_acl isn\[aq]t set, for creating buckets too.
+For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
+Note that this ACL is applied when server-side copying objects as S3
+doesn\[aq]t copy the ACL from the source but rather writes a fresh one.
+If the acl is an empty string then no X-Amz-Acl: header is added and
+the default (private) will be used.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ / Owner gets FULL_CONTROL.
+ 1 | No one else has access rights (default).
+ \[rs] (private)
+ / Owner gets FULL_CONTROL.
+ 2 | The AllUsers group gets READ access.
+ \[rs] (public-read)
+ / Owner gets FULL_CONTROL.
+ 3 | The AllUsers group gets READ and WRITE access.
+ | Granting this on a bucket is generally not recommended.
+ \[rs] (public-read-write)
+ / Owner gets FULL_CONTROL.
+ 4 | The AuthenticatedUsers group gets READ access.
+ \[rs] (authenticated-read)
+ / Object owner gets FULL_CONTROL.
+ 5 | Bucket owner gets READ access.
+ | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \[rs] (bucket-owner-read)
+ / Both the object owner and the bucket owner get FULL_CONTROL over the object.
+ 6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
+ \[rs] (bucket-owner-full-control)
+acl> 1
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n> No
+
+Configuration complete.
+Options:
+- type: s3
+- provider: Petabox
+- access_key_id: YOUR_ACCESS_KEY_ID
+- secret_access_key: YOUR_SECRET_ACCESS_KEY
+- region: us-east-1
+- endpoint: s3.petabox.io
+Keep this \[dq]My Petabox Storage\[dq] remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+\f[R]
+.fi
+.PP
+This will leave the config file looking like this.
+.IP
+.nf
+\f[C]
+[My Petabox Storage]
+type = s3
+provider = Petabox
+access_key_id = YOUR_ACCESS_KEY_ID
+secret_access_key = YOUR_SECRET_ACCESS_KEY
+region = us-east-1
+endpoint = s3.petabox.io
+\f[R]
+.fi
.SS Storj
.PP
Storj is a decentralized cloud storage which can be used through its
@@ -34845,7 +35651,7 @@ However, this can double the amount of small files in storage and incur
additional service charges.
You can even use chunker to force md5/sha1 support in any other remote
at expense of sidecar meta objects by setting e.g.
-\f[C]chunk_type=sha1all\f[R] to force hashsums and
+\f[C]hash_type=sha1all\f[R] to force hashsums and
\f[C]chunk_size=1P\f[R] to effectively disable chunking.
.PP
Normally, when a file is copied to chunker controlled remote, chunker
@@ -36024,8 +36830,8 @@ For cloud storage systems with case sensitive file names (e.g.
Google Drive), \f[C]base64\f[R] can be used to reduce file name length.
For cloud storage systems using UTF-16 to store file names internally
(e.g.
-OneDrive), \f[C]base32768\f[R] can be used to drastically reduce file
-name length.
+OneDrive, Dropbox), \f[C]base32768\f[R] can be used to drastically
+reduce file name length.
.PP
An alternative, future rclone file name encryption mode may tolerate
backend provider path length limits.
@@ -36054,8 +36860,8 @@ Hashes are not stored for crypt.
However the data integrity is protected by an extremely strong crypto
authenticator.
.PP
-Use the \f[C]rclone cryptcheck\f[R] command to check the integrity of a
-crypted remote instead of \f[C]rclone check\f[R] which can\[aq]t check
+Use the \f[C]rclone cryptcheck\f[R] command to check the integrity of an
+encrypted remote instead of \f[C]rclone check\f[R] which can\[aq]t check
the checksums properly.
.SS Standard options
.PP
@@ -36114,7 +36920,7 @@ Very simple filename obfuscation.
.IP \[bu] 2
Don\[aq]t encrypt the file names.
.IP \[bu] 2
-Adds a \[dq].bin\[dq] extension only.
+Adds a \[dq].bin\[dq], or \[dq]suffix\[dq] extension only.
.RE
.RE
.SS --crypt-directory-name-encryption
@@ -36190,6 +36996,8 @@ Here are the Advanced options specific to crypt (Encrypt/Decrypt a
remote).
.SS --crypt-server-side-across-configs
.PP
+Deprecated: use --server-side-across-configs instead.
+.PP
Allow server-side operations (e.g.
copy) to work across different crypt configs.
.PP
@@ -36261,6 +37069,23 @@ Don\[aq]t encrypt file data, leave it unencrypted.
Encrypt file data.
.RE
.RE
+.SS --crypt-pass-bad-blocks
+.PP
+If set this will pass bad blocks through as all 0.
+.PP
+This should not be set in normal operation, it should only be set if
+trying to recover an encrypted file with errors and it is desired to
+recover as much of the file as possible.
+.PP
+Properties:
+.IP \[bu] 2
+Config: pass_bad_blocks
+.IP \[bu] 2
+Env Var: RCLONE_CRYPT_PASS_BAD_BLOCKS
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --crypt-filename-encoding
.PP
How to encode the encrypted filename to text string.
@@ -36304,9 +37129,25 @@ Suitable if your remote counts UTF-16 or
.IP \[bu] 2
Unicode codepoint instead of UTF-8 byte length.
(Eg.
-Onedrive)
+Onedrive, Dropbox)
.RE
.RE
+.SS --crypt-suffix
+.PP
+If this is set it will override the default suffix of \[dq].bin\[dq].
+.PP
+Setting suffix to \[dq]none\[dq] will result in an empty suffix.
+This may be useful when the path length is critical.
+.PP
+Properties:
+.IP \[bu] 2
+Config: suffix
+.IP \[bu] 2
+Env Var: RCLONE_CRYPT_SUFFIX
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Default: \[dq].bin\[dq]
.SS Metadata
.PP
Any metadata supported by the underlying remote is read and written.
@@ -36374,10 +37215,10 @@ rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
\f[R]
.fi
-.SS Backing up a crypted remote
+.SS Backing up an encrypted remote
.PP
-If you wish to backup a crypted remote, it is recommended that you use
-\f[C]rclone sync\f[R] on the encrypted files, and make sure the
+If you wish to backup an encrypted remote, it is recommended that you
+use \f[C]rclone sync\f[R] on the encrypted files, and make sure the
passwords are the same in the new encrypted remote.
.PP
This will have the following advantages
@@ -37379,9 +38220,9 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
.IP \[bu] 2
-batch_mode: async - default batch_timeout is 500ms
+batch_mode: async - default batch_timeout is 10s
.IP \[bu] 2
-batch_mode: sync - default batch_timeout is 10s
+batch_mode: sync - default batch_timeout is 500ms
.IP \[bu] 2
batch_mode: off - not in use
.PP
@@ -37407,6 +38248,19 @@ Env Var: RCLONE_DROPBOX_BATCH_COMMIT_TIMEOUT
Type: Duration
.IP \[bu] 2
Default: 10m0s
+.SS --dropbox-pacer-min-sleep
+.PP
+Minimum time to sleep between API calls.
+.PP
+Properties:
+.IP \[bu] 2
+Config: pacer_min_sleep
+.IP \[bu] 2
+Env Var: RCLONE_DROPBOX_PACER_MIN_SLEEP
+.IP \[bu] 2
+Type: Duration
+.IP \[bu] 2
+Default: 10ms
.SS --dropbox-encoding
.PP
The encoding for the backend.
@@ -38828,6 +39682,21 @@ Env Var: RCLONE_GCS_PROJECT_NUMBER
Type: string
.IP \[bu] 2
Required: false
+.SS --gcs-user-project
+.PP
+User project.
+.PP
+Optional - needed only for requester pays.
+.PP
+Properties:
+.IP \[bu] 2
+Config: user_project
+.IP \[bu] 2
+Env Var: RCLONE_GCS_USER_PROJECT
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
.SS --gcs-service-account-file
.PP
Service Account Credentials JSON file path.
@@ -39397,6 +40266,23 @@ Env Var: RCLONE_GCS_TOKEN_URL
Type: string
.IP \[bu] 2
Required: false
+.SS --gcs-directory-markers
+.PP
+Upload an empty object with a trailing slash when a new directory is
+created
+.PP
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with \[dq]/\[dq], to persist the folder.
+.PP
+Properties:
+.IP \[bu] 2
+Config: directory_markers
+.IP \[bu] 2
+Env Var: RCLONE_GCS_DIRECTORY_MARKERS
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --gcs-no-check-bucket
.PP
If set, don\[aq]t attempt to check the bucket exists or create it.
@@ -39718,18 +40604,12 @@ You must have a project - create one if you don\[aq]t.
.IP \[bu] 2
Then go to \[dq]IAM & admin\[dq] -> \[dq]Service Accounts\[dq].
.IP \[bu] 2
-Use the \[dq]Create Credentials\[dq] button.
-Fill in \[dq]Service account name\[dq] with something that identifies
-your client.
-\[dq]Role\[dq] can be empty.
+Use the \[dq]Create Service Account\[dq] button.
+Fill in \[dq]Service account name\[dq] and \[dq]Service account ID\[dq]
+with something that identifies your client.
.IP \[bu] 2
-Tick \[dq]Furnish a new private key\[dq] - select \[dq]Key type
-JSON\[dq].
-.IP \[bu] 2
-Tick \[dq]Enable G Suite Domain-wide Delegation\[dq].
-This option makes \[dq]impersonation\[dq] possible, as documented here:
-Delegating domain-wide authority to the service
-account (https://developers.google.com/identity/protocols/OAuth2ServiceAccount#delegatingauthority)
+Select \[dq]Create And Continue\[dq].
+Step 2 and 3 are optional.
.IP \[bu] 2
These credentials are what rclone will use for authentication.
If you ever need to remove access, press the \[dq]Delete service account
@@ -39936,7 +40816,7 @@ like a symlink in unix, except they point to the underlying file data
the inode in unix terms) so they don\[aq]t break if the source is
renamed or moved about.
.PP
-Be default rclone treats these as follows.
+By default rclone treats these as follows.
.PP
For shortcuts pointing to files:
.IP \[bu] 2
@@ -41006,6 +41886,8 @@ Type: int
Default: 100
.SS --drive-server-side-across-configs
.PP
+Deprecated: use --server-side-across-configs instead.
+.PP
Allow server-side operations (e.g.
copy) to work across different drive configs.
.PP
@@ -41175,6 +42057,39 @@ Env Var: RCLONE_DRIVE_ENCODING
Type: MultiEncoder
.IP \[bu] 2
Default: InvalidUtf8
+.SS --drive-env-auth
+.PP
+Get IAM credentials from runtime (environment variables or instance meta
+data if no env vars).
+.PP
+Only applies if service_account_file and service_account_credentials is
+blank.
+.PP
+Properties:
+.IP \[bu] 2
+Config: env_auth
+.IP \[bu] 2
+Env Var: RCLONE_DRIVE_ENV_AUTH
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
+.IP \[bu] 2
+Examples:
+.RS 2
+.IP \[bu] 2
+\[dq]false\[dq]
+.RS 2
+.IP \[bu] 2
+Enter credentials in the next step.
+.RE
+.IP \[bu] 2
+\[dq]true\[dq]
+.RS 2
+.IP \[bu] 2
+Get GCP IAM credentials from the environment (env vars or IAM).
+.RE
+.RE
.SS Backend commands
.PP
Here are the commands specific to the drive backend.
@@ -42550,7 +43465,7 @@ Databases are maintained one per \f[I]base\f[R] backend, named like
Checksums for multiple \f[C]alias\f[R]-es into a single base backend
will be stored in the single database.
All local paths are treated as aliases into the \f[C]local\f[R] backend
-(unless crypted or chunked) and stored in
+(unless encrypted or chunked) and stored in
\f[C]\[ti]/.cache/rclone/kv/local\[ti]hasher.bolt\f[R].
Databases can be shared between multiple rclone processes.
.SH HDFS
@@ -42811,7 +43726,7 @@ Required: false
Kerberos data transfer protection: authentication|integrity|privacy.
.PP
Specifies whether or not authentication, data signature integrity
-checks, and wire encryption is required when communicating the the
+checks, and wire encryption are required when communicating with the
datanodes.
Possible values are \[aq]authentication\[aq], \[aq]integrity\[aq] and
\[aq]privacy\[aq].
@@ -44461,7 +45376,7 @@ Small files will be cached in memory - see the
--jottacloud-md5-memory-limit flag.
When uploading from local disk the source checksum is always available,
so this does not apply.
-Starting with rclone version 1.52 the same is true for crypted remotes
+Starting with rclone version 1.52 the same is true for encrypted remotes
(in older versions the crypt backend would not calculate hashes for
uploads from local disk, so the Jottacloud backend had to do it as
described above).
@@ -46027,7 +46942,7 @@ MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become
very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
-HTTPS is normally not necesary since all data is already encrypted
+HTTPS is normally not necessary since all data is already encrypted
anyway.
Enabling it will increase CPU usage and add network overhead.
.PP
@@ -46794,6 +47709,21 @@ authenticate to
.IP \[bu] 2
\f[C]AZURE_PASSWORD\f[R]: the user\[aq]s password
.RE
+.IP "4." 3
+Workload Identity
+.RS 4
+.IP \[bu] 2
+\f[C]AZURE_TENANT_ID\f[R]: Tenant to authenticate in.
+.IP \[bu] 2
+\f[C]AZURE_CLIENT_ID\f[R]: Client ID of the application the user will
+authenticate to.
+.IP \[bu] 2
+\f[C]AZURE_FEDERATED_TOKEN_FILE\f[R]: Path to projected service account
+token file.
+.IP \[bu] 2
+\f[C]AZURE_AUTHORITY_HOST\f[R]: Authority of an Azure Active Directory
+endpoint (default: login.microsoftonline.com).
+.RE
.SS Env Auth: 2. Managed Service Identity Credentials
.PP
When using Managed Service Identity if the VM(SS) on which this program
@@ -46829,7 +47759,7 @@ Or
.IP
.nf
\f[C]
-rclone lsf --azureblob-env-auth --azureblob-acccount=ACCOUNT :azureblob:CONTAINER
+rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
\f[R]
.fi
.PP
@@ -47556,6 +48486,26 @@ Blob data within this container can be read via anonymous request.
Allow full public read access for container and blob data.
.RE
.RE
+.SS --azureblob-directory-markers
+.PP
+Upload an empty object with a trailing slash when a new directory is
+created
+.PP
+Empty folders are unsupported for bucket based remotes, this option
+creates an empty object ending with \[dq]/\[dq], to persist the folder.
+.PP
+This object also has the metadata \[dq]hdi_isfolder = true\[dq] to
+conform to the Microsoft standard.
+.PP
+Properties:
+.IP \[bu] 2
+Config: directory_markers
+.IP \[bu] 2
+Env Var: RCLONE_AZUREBLOB_DIRECTORY_MARKERS
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --azureblob-no-check-container
.PP
If set, don\[aq]t attempt to check the container exists or create it.
@@ -48297,6 +49247,8 @@ Type: bool
Default: false
.SS --onedrive-server-side-across-configs
.PP
+Deprecated: use --server-side-across-configs instead.
+.PP
Allow server-side operations (e.g.
copy) to work across different onedrive configs.
.PP
@@ -48444,7 +49396,7 @@ Required: false
Specify the hash in use for the backend.
.PP
This specifies the hash type in use.
-If set to \[dq]auto\[dq] it will use the default hash which is is
+If set to \[dq]auto\[dq] it will use the default hash which is
QuickXorHash.
.PP
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
@@ -48510,6 +49462,35 @@ CRC32
None - don\[aq]t use any hashes
.RE
.RE
+.SS --onedrive-av-override
+.PP
+Allows download of files the server thinks has a virus.
+.PP
+The onedrive/sharepoint server may check files uploaded with an Anti
+Virus checker.
+If it detects any potential viruses or malware it will block download of
+the file.
+.PP
+In this case you will see a message like this
+.IP
+.nf
+\f[C]
+server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
+\f[R]
+.fi
+.PP
+If you are 100% sure you want to download this file anyway then use the
+--onedrive-av-override flag, or av_override = true in the config file.
+.PP
+Properties:
+.IP \[bu] 2
+Config: av_override
+.IP \[bu] 2
+Env Var: RCLONE_ONEDRIVE_AV_OVERRIDE
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --onedrive-encoding
.PP
The encoding for the backend.
@@ -49963,8 +50944,7 @@ None
.RE
.SS --oos-sse-kms-key-id
.PP
-if using using your own master key in vault, this header specifies the
-OCID
+if using your own master key in vault, this header specifies the OCID
(https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)
of a master encryption key used to call the Key Management service to
generate a data encryption key or to encrypt or decrypt a data
@@ -50789,6 +51769,8 @@ Storage (https://www.ovh.co.uk/public-cloud/storage/object-storage/)
Oracle Cloud
Storage (https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
.IP \[bu] 2
+Blomp Cloud Storage (https://www.blomp.com/cloud-storage/)
+.IP \[bu] 2
IBM Bluemix Cloud ObjectStorage
Swift (https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
.PP
@@ -50819,7 +51801,7 @@ name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
-XX / OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
+XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
\[rs] \[dq]swift\[dq]
[snip]
Storage> swift
@@ -50848,6 +51830,8 @@ Choose a number from below, or type in your own value
\[rs] \[dq]https://auth.storage.memset.com/v2.0\[dq]
6 / OVH
\[rs] \[dq]https://auth.cloud.ovh.net/v3\[dq]
+ 7 / Blomp Cloud Storage
+ \[rs] \[dq]https://authenticate.ain.net\[dq]
auth>
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
user_id>
@@ -51072,7 +52056,7 @@ be used in JSON strings.
.SS Standard options
.PP
Here are the Standard options specific to swift (OpenStack Swift
-(Rackspace Cloud Files, Memset Memstore, OVH)).
+(Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
.SS --swift-env-auth
.PP
Get swift credentials from environment variables in standard OpenStack
@@ -51183,6 +52167,12 @@ Memset Memstore UK v2
.IP \[bu] 2
OVH
.RE
+.IP \[bu] 2
+\[dq]https://authenticate.ain.net\[dq]
+.RS 2
+.IP \[bu] 2
+Blomp Cloud Storage
+.RE
.RE
.SS --swift-user-id
.PP
@@ -51422,7 +52412,7 @@ OVH Public Cloud Archive
.SS Advanced options
.PP
Here are the Advanced options specific to swift (OpenStack Swift
-(Rackspace Cloud Files, Memset Memstore, OVH)).
+(Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
.SS --swift-leave-parts-on-error
.PP
If true avoid calling abort upload on a failure.
@@ -51923,6 +52913,357 @@ Env Var: RCLONE_PCLOUD_PASSWORD
Type: string
.IP \[bu] 2
Required: false
+.SH PikPak
+.PP
+PikPak is a private cloud drive (https://mypikpak.com/).
+.PP
+Paths are specified as \f[C]remote:path\f[R], and may be as deep as
+required, e.g.
+\f[C]remote:directory/subdirectory\f[R].
+.SS Configuration
+.PP
+Here is an example of making a remote for PikPak.
+.PP
+First run:
+.IP
+.nf
+\f[C]
+ rclone config
+\f[R]
+.fi
+.PP
+This will guide you through an interactive setup process:
+.IP
+.nf
+\f[C]
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+
+Enter name for new remote.
+name> remote
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+XX / PikPak
+ \[rs] (pikpak)
+Storage> XX
+
+Option user.
+Pikpak username.
+Enter a value.
+user> USERNAME
+
+Option pass.
+Pikpak password.
+Choose an alternative below.
+y) Yes, type in my own password
+g) Generate random password
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n>
+
+Configuration complete.
+Options:
+- type: pikpak
+- user: USERNAME
+- pass: *** ENCRYPTED ***
+- token: {\[dq]access_token\[dq]:\[dq]eyJ...\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]os...\[dq],\[dq]expiry\[dq]:\[dq]2023-01-26T18:54:32.170582647+09:00\[dq]}
+Keep this \[dq]remote\[dq] remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+\f[R]
+.fi
+.SS Standard options
+.PP
+Here are the Standard options specific to pikpak (PikPak).
+.SS --pikpak-user
+.PP
+Pikpak username.
+.PP
+Properties:
+.IP \[bu] 2
+Config: user
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_USER
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: true
+.SS --pikpak-pass
+.PP
+Pikpak password.
+.PP
+\f[B]NB\f[R] Input to this must be obscured - see rclone
+obscure (https://rclone.org/commands/rclone_obscure/).
+.PP
+Properties:
+.IP \[bu] 2
+Config: pass
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_PASS
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: true
+.SS Advanced options
+.PP
+Here are the Advanced options specific to pikpak (PikPak).
+.SS --pikpak-client-id
+.PP
+OAuth Client Id.
+.PP
+Leave blank normally.
+.PP
+Properties:
+.IP \[bu] 2
+Config: client_id
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_CLIENT_ID
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.SS --pikpak-client-secret
+.PP
+OAuth Client Secret.
+.PP
+Leave blank normally.
+.PP
+Properties:
+.IP \[bu] 2
+Config: client_secret
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_CLIENT_SECRET
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.SS --pikpak-token
+.PP
+OAuth Access Token as a JSON blob.
+.PP
+Properties:
+.IP \[bu] 2
+Config: token
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_TOKEN
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.SS --pikpak-auth-url
+.PP
+Auth server URL.
+.PP
+Leave blank to use the provider defaults.
+.PP
+Properties:
+.IP \[bu] 2
+Config: auth_url
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_AUTH_URL
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.SS --pikpak-token-url
+.PP
+Token server url.
+.PP
+Leave blank to use the provider defaults.
+.PP
+Properties:
+.IP \[bu] 2
+Config: token_url
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_TOKEN_URL
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.SS --pikpak-root-folder-id
+.PP
+ID of the root folder.
+Leave blank normally.
+.PP
+Fill in for rclone to use a non root folder as its starting point.
+.PP
+Properties:
+.IP \[bu] 2
+Config: root_folder_id
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
+.IP \[bu] 2
+Type: string
+.IP \[bu] 2
+Required: false
+.SS --pikpak-use-trash
+.PP
+Send files to the trash instead of deleting permanently.
+.PP
+Defaults to true, namely sending files to the trash.
+Use \f[C]--pikpak-use-trash=false\f[R] to delete files permanently
+instead.
+.PP
+Properties:
+.IP \[bu] 2
+Config: use_trash
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_USE_TRASH
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: true
+.SS --pikpak-trashed-only
+.PP
+Only show files that are in the trash.
+.PP
+This will show trashed files in their original directory structure.
+.PP
+Properties:
+.IP \[bu] 2
+Config: trashed_only
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_TRASHED_ONLY
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
+.SS --pikpak-hash-memory-limit
+.PP
+Files bigger than this will be cached on disk to calculate hash if
+required.
+.PP
+Properties:
+.IP \[bu] 2
+Config: hash_memory_limit
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
+.IP \[bu] 2
+Type: SizeSuffix
+.IP \[bu] 2
+Default: 10Mi
+.SS --pikpak-encoding
+.PP
+The encoding for the backend.
+.PP
+See the encoding section in the
+overview (https://rclone.org/overview/#encoding) for more info.
+.PP
+Properties:
+.IP \[bu] 2
+Config: encoding
+.IP \[bu] 2
+Env Var: RCLONE_PIKPAK_ENCODING
+.IP \[bu] 2
+Type: MultiEncoder
+.IP \[bu] 2
+Default:
+Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+.SS Backend commands
+.PP
+Here are the commands specific to the pikpak backend.
+.PP
+Run them with
+.IP
+.nf
+\f[C]
+rclone backend COMMAND remote:
+\f[R]
+.fi
+.PP
+The help below will explain what arguments each command takes.
+.PP
+See the backend (https://rclone.org/commands/rclone_backend/) command
+for more info on how to pass options and arguments.
+.PP
+These can be run on a running backend using the rc command
+backend/command (https://rclone.org/rc/#backend-command).
+.SS addurl
+.PP
+Add offline download task for url
+.IP
+.nf
+\f[C]
+rclone backend addurl remote: [options] [+]
+\f[R]
+.fi
+.PP
+This command adds offline download task for url.
+.PP
+Usage:
+.IP
+.nf
+\f[C]
+rclone backend addurl pikpak:dirpath url
+\f[R]
+.fi
+.PP
+Downloads will be stored in \[aq]dirpath\[aq].
+If \[aq]dirpath\[aq] is invalid, download will fallback to default
+\[aq]My Pack\[aq] folder.
+.SS decompress
+.PP
+Request decompress of a file/files in a folder
+.IP
+.nf
+\f[C]
+rclone backend decompress remote: [options] [+]
+\f[R]
+.fi
+.PP
+This command requests decompress of file/files in a folder.
+.PP
+Usage:
+.IP
+.nf
+\f[C]
+rclone backend decompress pikpak:dirpath {filename} -o password=password
+rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
+\f[R]
+.fi
+.PP
+An optional argument \[aq]filename\[aq] can be specified for a file
+located in \[aq]pikpak:dirpath\[aq].
+You may want to pass \[aq]-o password=password\[aq] for a
+password-protected files.
+Also, pass \[aq]-o delete-src-file\[aq] to delete source files after
+decompression finished.
+.PP
+Result:
+.IP
+.nf
+\f[C]
+{
+ \[dq]Decompressed\[dq]: 17,
+ \[dq]SourceDeleted\[dq]: 0,
+ \[dq]Errors\[dq]: 0
+}
+\f[R]
+.fi
+.SS Limitations
+.SS Hashes
+.PP
+PikPak supports MD5 hash, but sometimes given empty especially for
+user-uploaded files.
+.SS Deleted files
+.PP
+Deleted files will still be visible with \f[C]--pikpak-trashed-only\f[R]
+even after the trash emptied.
+This goes away after few days.
.SH premiumize.me
.PP
Paths are specified as \f[C]remote:path\f[R]
@@ -53861,7 +55202,7 @@ VAR1=value VAR2=value
\f[R]
.fi
.PP
-and pass variables with spaces in in quotes, eg
+and pass variables with spaces in quotes, eg
.IP
.nf
\f[C]
@@ -53956,6 +55297,33 @@ Env Var: RCLONE_SFTP_MACS
Type: SpaceSepList
.IP \[bu] 2
Default:
+.SS --sftp-host-key-algorithms
+.PP
+Space separated list of host key algorithms, ordered by preference.
+.PP
+At least one must match with server configuration.
+This can be checked for example using ssh -Q HostKeyAlgorithms.
+.PP
+Note: This can affect the outcome of key negotiation with the server
+even if server host key validation is not enabled.
+.PP
+Example:
+.IP
+.nf
+\f[C]
+ssh-ed25519 ssh-rsa ssh-dss
+\f[R]
+.fi
+.PP
+Properties:
+.IP \[bu] 2
+Config: host_key_algorithms
+.IP \[bu] 2
+Env Var: RCLONE_SFTP_HOST_KEY_ALGORITHMS
+.IP \[bu] 2
+Type: SpaceSepList
+.IP \[bu] 2
+Default:
.SS Limitations
.PP
On some SFTP servers (e.g.
@@ -54012,7 +55380,7 @@ The first path segment must be the name of the share, which you entered
when you started to share on Windows.
On smbd, it\[aq]s the section title in \f[C]smb.conf\f[R] (usually in
\f[C]/etc/samba/\f[R]) file.
-You can find shares by quering the root if you\[aq]re unsure (e.g.
+You can find shares by querying the root if you\[aq]re unsure (e.g.
\f[C]rclone lsd remote:\f[R]).
.PP
You can\[aq]t access to the shared printers from rclone, obviously.
@@ -55316,6 +56684,7 @@ rclone copy /home/source remote:backup
.SS Modified time and hashes
.PP
Uptobox supports neither modified times nor checksums.
+All timestamps will read as that set by \f[C]--default-time\f[R].
.SS Restricted filename characters
.PP
In addition to the default restricted characters
@@ -55373,6 +56742,19 @@ Required: false
.SS Advanced options
.PP
Here are the Advanced options specific to uptobox (Uptobox).
+.SS --uptobox-private
+.PP
+Set to make uploaded files private
+.PP
+Properties:
+.IP \[bu] 2
+Config: private
+.IP \[bu] 2
+Env Var: RCLONE_UPTOBOX_PRIVATE
+.IP \[bu] 2
+Type: bool
+.IP \[bu] 2
+Default: false
.SS --uptobox-encoding
.PP
The encoding for the backend.
@@ -55913,17 +57295,19 @@ Choose a number from below, or type in your own value
url> https://example.com/remote.php/webdav/
Name of the WebDAV site/service/software you are using
Choose a number from below, or type in your own value
- 1 / Nextcloud
- \[rs] \[dq]nextcloud\[dq]
- 2 / Owncloud
- \[rs] \[dq]owncloud\[dq]
- 3 / Sharepoint Online, authenticated by Microsoft account.
- \[rs] \[dq]sharepoint\[dq]
- 4 / Sharepoint with NTLM authentication. Usually self-hosted or on-premises.
- \[rs] \[dq]sharepoint-ntlm\[dq]
- 5 / Other site/service or software
- \[rs] \[dq]other\[dq]
-vendor> 1
+ 1 / Fastmail Files
+ \[rs] (fastmail)
+ 2 / Nextcloud
+ \[rs] (nextcloud)
+ 3 / Owncloud
+ \[rs] (owncloud)
+ 4 / Sharepoint Online, authenticated by Microsoft account
+ \[rs] (sharepoint)
+ 5 / Sharepoint with NTLM authentication, usually self-hosted or on-premises
+ \[rs] (sharepoint-ntlm)
+ 6 / Other site/service or software
+ \[rs] (other)
+vendor> 2
User name
user> user
Password.
@@ -55982,11 +57366,12 @@ rclone copy /home/source remote:backup
.SS Modified time and hashes
.PP
Plain WebDAV does not support modified times.
-However when used with Owncloud or Nextcloud rclone will support
-modified times.
+However when used with Fastmail Files, Owncloud or Nextcloud rclone will
+support modified times.
.PP
Likewise plain WebDAV does not support hashes, however when used with
-Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes.
+Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5
+hashes.
Depending on the exact version of Owncloud or Nextcloud hashes may
appear on all objects, or only on objects which had a hash uploaded with
them.
@@ -56026,6 +57411,12 @@ Required: false
Examples:
.RS 2
.IP \[bu] 2
+\[dq]fastmail\[dq]
+.RS 2
+.IP \[bu] 2
+Fastmail Files
+.RE
+.IP \[bu] 2
\[dq]nextcloud\[dq]
.RS 2
.IP \[bu] 2
@@ -56162,9 +57553,53 @@ Env Var: RCLONE_WEBDAV_HEADERS
Type: CommaSepList
.IP \[bu] 2
Default:
+.SS --webdav-pacer-min-sleep
+.PP
+Minimum time to sleep between API calls.
+.PP
+Properties:
+.IP \[bu] 2
+Config: pacer_min_sleep
+.IP \[bu] 2
+Env Var: RCLONE_WEBDAV_PACER_MIN_SLEEP
+.IP \[bu] 2
+Type: Duration
+.IP \[bu] 2
+Default: 10ms
+.SS --webdav-nextcloud-chunk-size
+.PP
+Nextcloud upload chunk size.
+.PP
+We recommend configuring your NextCloud instance to increase the max
+chunk size to 1 GB for better upload performances.
+See
+https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
+.PP
+Set to 0 to disable chunked uploading.
+.PP
+Properties:
+.IP \[bu] 2
+Config: nextcloud_chunk_size
+.IP \[bu] 2
+Env Var: RCLONE_WEBDAV_NEXTCLOUD_CHUNK_SIZE
+.IP \[bu] 2
+Type: SizeSuffix
+.IP \[bu] 2
+Default: 10Mi
.SS Provider notes
.PP
See below for notes on specific providers.
+.SS Fastmail Files
+.PP
+Use \f[C]https://webdav.fastmail.com/\f[R] or a subdirectory as the URL,
+and your Fastmail email \f[C]username\[at]domain.tld\f[R] as the
+username.
+Follow this
+documentation (https://www.fastmail.help/hc/en-us/articles/360058752854-App-passwords)
+to create an app password with access to \f[C]Files (WebDAV)\f[R] and
+use this as the password.
+.PP
+Fastmail supports modified times using the \f[C]X-OC-Mtime\f[R] header.
.SS Owncloud
.PP
Click on the settings cog in the bottom right of the page and this will
@@ -58046,6 +59481,525 @@ Options:
.IP \[bu] 2
\[dq]error\[dq]: return an error based on option value
.SH Changelog
+.SS v1.63.0 - 2023-06-30
+.PP
+See commits (https://github.com/rclone/rclone/compare/v1.62.0...v1.63.0)
+.IP \[bu] 2
+New backends
+.RS 2
+.IP \[bu] 2
+Pikpak (https://rclone.org/pikpak/) (wiserain)
+.IP \[bu] 2
+New S3 providers
+.RS 2
+.IP \[bu] 2
+petabox.io (https://rclone.org/s3/#petabox) (Andrei Smirnov)
+.IP \[bu] 2
+Google Cloud Storage (https://rclone.org/s3/#google-cloud-storage)
+(Anthony Pessy)
+.RE
+.IP \[bu] 2
+New WebDAV providers
+.RS 2
+.IP \[bu] 2
+Fastmail (https://rclone.org/webdav/#fastmail-files) (Arnavion)
+.RE
+.RE
+.IP \[bu] 2
+Major changes
+.RS 2
+.IP \[bu] 2
+Files will be copied to a temporary name ending in \f[C].partial\f[R]
+when copying to \f[C]local\f[R],\f[C]ftp\f[R],\f[C]sftp\f[R] then
+renamed at the end of the transfer.
+(Janne Hellsten, Nick Craig-Wood)
+.RS 2
+.IP \[bu] 2
+This helps with data integrity as we don\[aq]t delete the existing file
+until the new one is complete.
+.IP \[bu] 2
+It can be disabled with the --inplace (https://rclone.org/docs/#inplace)
+flag.
+.IP \[bu] 2
+This behaviour will also happen if the backend is wrapped, for example
+\f[C]sftp\f[R] wrapped with \f[C]crypt\f[R].
+.RE
+.IP \[bu] 2
+The s3 (https://rclone.org/s3/#s3-directory-markers), azureblob and gcs
+backends now support directory markers so empty directories are
+supported (J\[u0101]nis Bebr\[u012B]tis, Nick Craig-Wood)
+.IP \[bu] 2
+The --default-time (https://rclone.org/docs/#default-time-time) flag now
+controls the unknown modification time of files/dirs (Nick Craig-Wood)
+.RS 2
+.IP \[bu] 2
+If a file or directory does not have a modification time rclone can read
+then rclone will display this fixed time instead.
+.IP \[bu] 2
+For the old behaviour use \f[C]--default-time 0s\f[R] which will set
+this time to the time rclone started up.
+.RE
+.RE
+.IP \[bu] 2
+New Features
+.RS 2
+.IP \[bu] 2
+build
+.RS 2
+.IP \[bu] 2
+Modernise linters in use and fixup all affected code (albertony)
+.IP \[bu] 2
+Push docker beta to GHCR (GitHub container registry) (Richard Tweed)
+.RE
+.IP \[bu] 2
+cat: Add \f[C]--separator\f[R] option to cat command (Loren Gordon)
+.IP \[bu] 2
+config
+.RS 2
+.IP \[bu] 2
+Do not remove/overwrite other files during config file save (albertony)
+.IP \[bu] 2
+Do not overwrite config file symbolic link (albertony)
+.IP \[bu] 2
+Stop \f[C]config create\f[R] making invalid config files (Nick
+Craig-Wood)
+.RE
+.IP \[bu] 2
+doc updates (Adam K, Aditya Basu, albertony, asdffdsazqqq, Damo,
+danielkrajnik, Dimitri Papadopoulos, dlitster, Drew Parsons, jumbi77,
+kapitainsky, mac-15, Mariusz Suchodolski, Nick Craig-Wood, NickIAm,
+Rintze Zelle, Stanislav Gromov, Tareq Sharafy, URenko, yuudi, Zach Kipp)
+.IP \[bu] 2
+fs
+.RS 2
+.IP \[bu] 2
+Add \f[C]size\f[R] to JSON logs when moving or copying an object (Nick
+Craig-Wood)
+.IP \[bu] 2
+Allow boolean features to be enabled with \f[C]--disable !Feature\f[R]
+(Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+genautocomplete: Rename to \f[C]completion\f[R] with alias to the old
+name (Nick Craig-Wood)
+.IP \[bu] 2
+librclone: Added example on using \f[C]librclone\f[R] with Go (alankrit)
+.IP \[bu] 2
+lsjson: Make \f[C]--stat\f[R] more efficient (Nick Craig-Wood)
+.IP \[bu] 2
+operations
+.RS 2
+.IP \[bu] 2
+Implement \f[C]--multi-thread-write-buffer-size\f[R] for speed
+improvements on downloads (Paulo Schreiner)
+.IP \[bu] 2
+Reopen downloads on error when using \f[C]check --download\f[R] and
+\f[C]cat\f[R] (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+rc: \f[C]config/listremotes\f[R] includes remotes defined with
+environment variables (kapitainsky)
+.IP \[bu] 2
+selfupdate: Obey \f[C]--no-check-certificate\f[R] flag (Nick Craig-Wood)
+.IP \[bu] 2
+serve restic: Trigger systemd notify (Shyim)
+.IP \[bu] 2
+serve webdav: Implement owncloud checksum and modtime extensions
+(WeidiDeng)
+.IP \[bu] 2
+sync: \f[C]--suffix-keep-extension\f[R] preserve 2 part extensions like
+\&.tar.gz (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Bug Fixes
+.RS 2
+.IP \[bu] 2
+accounting
+.RS 2
+.IP \[bu] 2
+Fix Prometheus metrics to be the same as \f[C]core/stats\f[R] (Nick
+Craig-Wood)
+.IP \[bu] 2
+Bwlimit signal handler should always start (Sam Lai)
+.RE
+.IP \[bu] 2
+bisync: Fix \f[C]maxDelete\f[R] parameter being ignored via the rc (Nick
+Craig-Wood)
+.IP \[bu] 2
+cmd/ncdu: Fix screen corruption when logging (eNV25)
+.IP \[bu] 2
+filter: Fix deadlock with errors on \f[C]--files-from\f[R] (douchen)
+.IP \[bu] 2
+fs
+.RS 2
+.IP \[bu] 2
+Fix interaction between \f[C]--progress\f[R] and \f[C]--interactive\f[R]
+(Nick Craig-Wood)
+.IP \[bu] 2
+Fix infinite recursive call in pacer ModifyCalculator (fixes issue
+reported by the staticcheck linter) (albertony)
+.RE
+.IP \[bu] 2
+lib/atexit: Ensure OnError only calls cancel function once (Nick
+Craig-Wood)
+.IP \[bu] 2
+lib/rest: Fix problems re-using HTTP connections (Nick Craig-Wood)
+.IP \[bu] 2
+rc
+.RS 2
+.IP \[bu] 2
+Fix \f[C]operations/stat\f[R] with trailing \f[C]/\f[R] (Nick
+Craig-Wood)
+.IP \[bu] 2
+Fix missing \f[C]--rc\f[R] flags (Nick Craig-Wood)
+.IP \[bu] 2
+Fix output of Time values in \f[C]options/get\f[R] (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+serve dlna: Fix potential data race (Nick Craig-Wood)
+.IP \[bu] 2
+version: Fix reported os/kernel version for windows (albertony)
+.RE
+.IP \[bu] 2
+Mount
+.RS 2
+.IP \[bu] 2
+Add \f[C]--mount-case-insensitive\f[R] to force the mount to be case
+insensitive (Nick Craig-Wood)
+.IP \[bu] 2
+Removed unnecessary byte slice allocation for reads (Anagh Kumar
+Baranwal)
+.IP \[bu] 2
+Clarify rclone mount error when installed via homebrew (Nick Craig-Wood)
+.IP \[bu] 2
+Added _netdev to the example mount so it gets treated as a remote-fs
+rather than local-fs (Anagh Kumar Baranwal)
+.RE
+.IP \[bu] 2
+Mount2
+.RS 2
+.IP \[bu] 2
+Updated go-fuse version (Anagh Kumar Baranwal)
+.IP \[bu] 2
+Fixed statfs (Anagh Kumar Baranwal)
+.IP \[bu] 2
+Disable xattrs (Anagh Kumar Baranwal)
+.RE
+.IP \[bu] 2
+VFS
+.RS 2
+.IP \[bu] 2
+Add MkdirAll function to make a directory and all beneath (Nick
+Craig-Wood)
+.IP \[bu] 2
+Fix reload: failed to add virtual dir entry: file does not exist (Nick
+Craig-Wood)
+.IP \[bu] 2
+Fix writing to a read only directory creating spurious directory entries
+(WeidiDeng)
+.IP \[bu] 2
+Fix potential data race (Nick Craig-Wood)
+.IP \[bu] 2
+Fix backends being Shutdown too early when startup takes a long time
+(Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Local
+.RS 2
+.IP \[bu] 2
+Fix filtering of symlinks with \f[C]-l\f[R]/\f[C]--links\f[R] flag (Nick
+Craig-Wood)
+.IP \[bu] 2
+Fix /path/to/file.rclonelink when \f[C]-l\f[R]/\f[C]--links\f[R] is in
+use (Nick Craig-Wood)
+.IP \[bu] 2
+Fix crash with \f[C]--metadata\f[R] on Android (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Cache
+.RS 2
+.IP \[bu] 2
+Fix backends shutting down when in use when used via the rc (Nick
+Craig-Wood)
+.RE
+.IP \[bu] 2
+Crypt
+.RS 2
+.IP \[bu] 2
+Add \f[C]--crypt-suffix\f[R] option to set a custom suffix for encrypted
+files (jladbrook)
+.IP \[bu] 2
+Add \f[C]--crypt-pass-bad-blocks\f[R] to allow corrupted file output
+(Nick Craig-Wood)
+.IP \[bu] 2
+Fix reading 0 length files (Nick Craig-Wood)
+.IP \[bu] 2
+Try not to return \[dq]unexpected EOF\[dq] error (Nick Craig-Wood)
+.IP \[bu] 2
+Reduce allocations (albertony)
+.IP \[bu] 2
+Recommend Dropbox for \f[C]base32768\f[R] encoding (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Azure Blob
+.RS 2
+.IP \[bu] 2
+Empty directory markers (Nick Craig-Wood)
+.IP \[bu] 2
+Support azure workload identities (Tareq Sharafy)
+.IP \[bu] 2
+Fix azure blob uploads with multiple bits of metadata (Nick Craig-Wood)
+.IP \[bu] 2
+Fix azurite compatibility by sending nil tier if set to empty string
+(Roel Arents)
+.RE
+.IP \[bu] 2
+Combine
+.RS 2
+.IP \[bu] 2
+Implement missing methods (Nick Craig-Wood)
+.IP \[bu] 2
+Fix goroutine stack overflow on bad object (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Drive
+.RS 2
+.IP \[bu] 2
+Add \f[C]--drive-env-auth\f[R] to get IAM credentials from runtime
+(Peter Brunner)
+.IP \[bu] 2
+Update drive service account guide (Juang, Yi-Lin)
+.IP \[bu] 2
+Fix change notify picking up files outside the root (Nick Craig-Wood)
+.IP \[bu] 2
+Fix trailing slash mis-identificaton of folder as file (Nick Craig-Wood)
+.IP \[bu] 2
+Fix incorrect remote after Update on object (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Dropbox
+.RS 2
+.IP \[bu] 2
+Implement \f[C]--dropbox-pacer-min-sleep\f[R] flag (Nick Craig-Wood)
+.IP \[bu] 2
+Fix the dropbox batcher stalling (Misty)
+.RE
+.IP \[bu] 2
+Fichier
+.RS 2
+.IP \[bu] 2
+Add \f[C]--ficicher-cdn\f[R] option to use the CDN for download (Nick
+Craig-Wood)
+.RE
+.IP \[bu] 2
+FTP
+.RS 2
+.IP \[bu] 2
+Lower log message priority when \f[C]SetModTime\f[R] is not supported to
+debug (Tobias Gion)
+.IP \[bu] 2
+Fix \[dq]unsupported LIST line\[dq] errors on startup (Nick Craig-Wood)
+.IP \[bu] 2
+Fix \[dq]501 Not a valid pathname.\[dq] errors when creating directories
+(Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Google Cloud Storage
+.RS 2
+.IP \[bu] 2
+Empty directory markers (J\[u0101]nis Bebr\[u012B]tis, Nick Craig-Wood)
+.IP \[bu] 2
+Added \f[C]--gcs-user-project\f[R] needed for requester pays
+(Christopher Merry)
+.RE
+.IP \[bu] 2
+HTTP
+.RS 2
+.IP \[bu] 2
+Add client certificate user auth middleware.
+This can auth \f[C]serve restic\f[R] from the username in the client
+cert.
+(Peter Fern)
+.RE
+.IP \[bu] 2
+Jottacloud
+.RS 2
+.IP \[bu] 2
+Fix vfs writeback stuck in a failed upload loop with file versioning
+disabled (albertony)
+.RE
+.IP \[bu] 2
+Onedrive
+.RS 2
+.IP \[bu] 2
+Add \f[C]--onedrive-av-override\f[R] flag to download files flagged as
+virus (Nick Craig-Wood)
+.IP \[bu] 2
+Fix quickxorhash on 32 bit architectures (Nick Craig-Wood)
+.IP \[bu] 2
+Report any list errors during \f[C]rclone cleanup\f[R] (albertony)
+.RE
+.IP \[bu] 2
+Putio
+.RS 2
+.IP \[bu] 2
+Fix uploading to the wrong object on Update with overriden remote name
+(Nick Craig-Wood)
+.IP \[bu] 2
+Fix modification times not being preserved for server side copy and move
+(Nick Craig-Wood)
+.IP \[bu] 2
+Fix server side copy failures (400 errors) (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+S3
+.RS 2
+.IP \[bu] 2
+Empty directory markers (J\[u0101]nis Bebr\[u012B]tis, Nick Craig-Wood)
+.IP \[bu] 2
+Update Scaleway storage classes (Brian Starkey)
+.IP \[bu] 2
+Fix \f[C]--s3-versions\f[R] on individual objects (Nick Craig-Wood)
+.IP \[bu] 2
+Fix hang on aborting multpart upload with iDrive e2 (Nick Craig-Wood)
+.IP \[bu] 2
+Fix missing \[dq]tier\[dq] metadata (Nick Craig-Wood)
+.IP \[bu] 2
+Fix V3sign: add missing subresource delete (cc)
+.IP \[bu] 2
+Fix Arvancloud Domain and region changes and alphabetise the provider
+(Ehsan Tadayon)
+.IP \[bu] 2
+Fix Qiniu KODO quirks virtualHostStyle is false (zzq)
+.RE
+.IP \[bu] 2
+SFTP
+.RS 2
+.IP \[bu] 2
+Add \f[C]--sftp-host-key-algorithms\f[R] to allow specifying SSH host
+key algorithms (Joel)
+.IP \[bu] 2
+Fix using \f[C]--sftp-key-use-agent\f[R] and \f[C]--sftp-key-file\f[R]
+together needing private key file (Arnav Singh)
+.IP \[bu] 2
+Fix move to allow overwriting existing files (Nick Craig-Wood)
+.IP \[bu] 2
+Don\[aq]t stat directories before listing them (Nick Craig-Wood)
+.IP \[bu] 2
+Don\[aq]t check remote points to a file if it ends with / (Nick
+Craig-Wood)
+.RE
+.IP \[bu] 2
+Sharefile
+.RS 2
+.IP \[bu] 2
+Disable streamed transfers as they no longer work (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Smb
+.RS 2
+.IP \[bu] 2
+Code cleanup to avoid overwriting ctx before first use (fixes issue
+reported by the staticcheck linter) (albertony)
+.RE
+.IP \[bu] 2
+Storj
+.RS 2
+.IP \[bu] 2
+Fix \[dq]uplink: too many requests\[dq] errors when uploading to the
+same file (Nick Craig-Wood)
+.IP \[bu] 2
+Fix uploading to the wrong object on Update with overriden remote name
+(Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Swift
+.RS 2
+.IP \[bu] 2
+Ignore 404 error when deleting an object (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Union
+.RS 2
+.IP \[bu] 2
+Implement missing methods (Nick Craig-Wood)
+.IP \[bu] 2
+Allow errors to be unwrapped for inspection (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Uptobox
+.RS 2
+.IP \[bu] 2
+Add \f[C]--uptobox-private\f[R] flag to make all uploaded files private
+(Nick Craig-Wood)
+.IP \[bu] 2
+Fix improper regex (Aaron Gokaslan)
+.IP \[bu] 2
+Fix Update returning the wrong object (Nick Craig-Wood)
+.IP \[bu] 2
+Fix rmdir declaring that directories weren\[aq]t empty (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+WebDAV
+.RS 2
+.IP \[bu] 2
+nextcloud: Add support for chunked uploads (Paul)
+.IP \[bu] 2
+Set modtime using propset for owncloud and nextcloud (WeidiDeng)
+.IP \[bu] 2
+Make pacer minSleep configurable with \f[C]--webdav-pacer-min-sleep\f[R]
+(ed)
+.IP \[bu] 2
+Fix server side copy/move not overwriting (WeidiDeng)
+.IP \[bu] 2
+Fix modtime on server side copy for owncloud and nextcloud (Nick
+Craig-Wood)
+.RE
+.IP \[bu] 2
+Yandex
+.RS 2
+.IP \[bu] 2
+Fix 400 Bad Request on transfer failure (Nick Craig-Wood)
+.RE
+.IP \[bu] 2
+Zoho
+.RS 2
+.IP \[bu] 2
+Fix downloads with \f[C]Range:\f[R] header returning the wrong data
+(Nick Craig-Wood)
+.RE
+.SS v1.62.2 - 2023-03-16
+.PP
+See commits (https://github.com/rclone/rclone/compare/v1.62.1...v1.62.2)
+.IP \[bu] 2
+Bug Fixes
+.RS 2
+.IP \[bu] 2
+docker volume plugin: Add missing fuse3 dependency (Nick Craig-Wood)
+.IP \[bu] 2
+docs: Fix size documentation (asdffdsazqqq)
+.RE
+.IP \[bu] 2
+FTP
+.RS 2
+.IP \[bu] 2
+Fix 426 errors on downloads with vsftpd (Lesmiscore)
+.RE
+.SS v1.62.1 - 2023-03-15
+.PP
+See commits (https://github.com/rclone/rclone/compare/v1.62.0...v1.62.1)
+.IP \[bu] 2
+Bug Fixes
+.RS 2
+.IP \[bu] 2
+docker: Add missing fuse3 dependency (cycneuramus)
+.IP \[bu] 2
+build: Update release docs to be more careful with the tag (Nick
+Craig-Wood)
+.IP \[bu] 2
+build: Set Github release to draft while uploading binaries (Nick
+Craig-Wood)
+.RE
.SS v1.62.0 - 2023-03-14
.PP
See commits (https://github.com/rclone/rclone/compare/v1.61.0...v1.62.0)
@@ -63476,9 +65430,9 @@ Crypt
Calculate hashes for uploads from local disk (Nick Craig-Wood)
.RS 2
.IP \[bu] 2
-This allows crypted Jottacloud uploads without using local disk
+This allows encrypted Jottacloud uploads without using local disk
.IP \[bu] 2
-This means crypted s3/b2 uploads will now have hashes
+This means encrypted s3/b2 uploads will now have hashes
.RE
.IP \[bu] 2
Added \f[C]rclone backend decode\f[R]/\f[C]encode\f[R] commands to
@@ -66766,7 +68720,7 @@ Fix root folder caching (Remus Bunduc)
Crypt
.RS 2
.IP \[bu] 2
-Check the crypted hash of files when uploading for extra data security
+Check the encrypted hash of files when uploading for extra data security
.RE
.IP \[bu] 2
Dropbox
@@ -67704,7 +69658,7 @@ New commands
.IP \[bu] 2
\f[C]tree\f[R] - shows a nicely formatted recursive listing
.IP \[bu] 2
-\f[C]cryptdecode\f[R] - decode crypted file names (thanks ishuah)
+\f[C]cryptdecode\f[R] - decode encrypted file names (thanks ishuah)
.IP \[bu] 2
\f[C]config show\f[R] - print the config file
.IP \[bu] 2
@@ -68464,7 +70418,7 @@ Delete src files which already existed in dst
Fix deletion of src file when dst file older
.RE
.IP \[bu] 2
-Fix \f[C]rclone check\f[R] on crypted file systems
+Fix \f[C]rclone check\f[R] on encrypted file systems
.IP \[bu] 2
Make failed uploads not count as \[dq]Transferred\[dq]
.IP \[bu] 2
@@ -69884,11 +71838,37 @@ ensure it is at version 233 or higher.
Previous releases contain a bug which causes not all domains to be
resolved properly.
.PP
-Additionally with the \f[C]GODEBUG=netdns=\f[R] environment variable the
-Go resolver decision can be influenced.
+The Go resolver decision can be influenced with the
+\f[C]GODEBUG=netdns=...\f[R] environment variable.
This also allows to resolve certain issues with DNS resolution.
+On Windows or MacOS systems, try forcing use of the internal Go resolver
+by setting \f[C]GODEBUG=netdns=go\f[R] at runtime.
+On other systems (Linux, *BSD, etc) try forcing use of the system name
+resolver by setting \f[C]GODEBUG=netdns=cgo\f[R] (and recompile rclone
+from source with CGO enabled if necessary).
See the name resolution section in the go
docs (https://golang.org/pkg/net/#hdr-Name_Resolution).
+.SS Failed to start auth webserver on Windows
+.IP
+.nf
+\f[C]
+Error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+\&...
+yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start auth webserver: listen tcp 127.0.0.1:53682: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
+\f[R]
+.fi
+.PP
+This is sometimes caused by the Host Network Service causing issues with
+opening the port on the host.
+.PP
+A simple solution may be restarting the Host Network Service with eg.
+Powershell
+.IP
+.nf
+\f[C]
+Restart-Service hns
+\f[R]
+.fi
.SS The total size reported in the stats for a sync is wrong and keeps changing
.PP
It is likely you have more than 10,000 files that need to be synced.
@@ -69968,7 +71948,7 @@ Nick Craig-Wood
.SS Contributors
.PP
{{< rem
-\f[C]email addresses removed from here need to be addeed to bin/.ignore-emails to make sure update-authors.py doesn\[aq]t immediately put them back in again.\f[R]
+\f[C]email addresses removed from here need to be added to bin/.ignore-emails to make sure update-authors.py doesn\[aq]t immediately put them back in again.\f[R]
>}}
.IP \[bu] 2
Alex Couper
@@ -71127,7 +73107,7 @@ Zsolt Ero
.IP \[bu] 2
Lesmiscore
.IP \[bu] 2
-ehsantdy
+ehsantdy
.IP \[bu] 2
SwazRGB <65694696+swazrgb@users.noreply.github.com>
.IP \[bu] 2
@@ -71147,6 +73127,8 @@ Erik van Velzen
.IP \[bu] 2
Derek Battams
.IP \[bu] 2
+Paul
+.IP \[bu] 2
SimonLiu
.IP \[bu] 2
Hugo Laloge
@@ -71345,6 +73327,90 @@ Peter Brunner
Leandro Sacchet
.IP \[bu] 2
dependabot[bot] <49699333+dependabot[bot]\[at]users.noreply.github.com>
+.IP \[bu] 2
+cycneuramus <56681631+cycneuramus@users.noreply.github.com>
+.IP \[bu] 2
+Arnavion
+.IP \[bu] 2
+Christopher Merry
+.IP \[bu] 2
+Thibault Coupin
+.IP \[bu] 2
+Richard Tweed
+.IP \[bu] 2
+Zach Kipp
+.IP \[bu] 2
+yuudi <26199752+yuudi@users.noreply.github.com>
+.IP \[bu] 2
+NickIAm
+.IP \[bu] 2
+Juang, Yi-Lin
+.IP \[bu] 2
+jumbi77
+.IP \[bu] 2
+Aditya Basu
+.IP \[bu] 2
+ed
+.IP \[bu] 2
+Drew Parsons
+.IP \[bu] 2
+Joel
+.IP \[bu] 2
+wiserain
+.IP \[bu] 2
+Roel Arents
+.IP \[bu] 2
+Shyim
+.IP \[bu] 2
+Rintze Zelle <78232505+rzelle-lallemand@users.noreply.github.com>
+.IP \[bu] 2
+Damo
+.IP \[bu] 2
+WeidiDeng
+.IP \[bu] 2
+Brian Starkey
+.IP \[bu] 2
+jladbrook
+.IP \[bu] 2
+Loren Gordon
+.IP \[bu] 2
+dlitster
+.IP \[bu] 2
+Tobias Gion
+.IP \[bu] 2
+J\[u0101]nis Bebr\[u012B]tis
+.IP \[bu] 2
+Adam K
+.IP \[bu] 2
+Andrei Smirnov
+.IP \[bu] 2
+Janne Hellsten
+.IP \[bu] 2
+cc <12904584+shvc@users.noreply.github.com>
+.IP \[bu] 2
+Tareq Sharafy
+.IP \[bu] 2
+kapitainsky
+.IP \[bu] 2
+douchen
+.IP \[bu] 2
+Sam Lai <70988+slai@users.noreply.github.com>
+.IP \[bu] 2
+URenko <18209292+URenko@users.noreply.github.com>
+.IP \[bu] 2
+Stanislav Gromov
+.IP \[bu] 2
+Paulo Schreiner
+.IP \[bu] 2
+Mariusz Suchodolski
+.IP \[bu] 2
+danielkrajnik
+.IP \[bu] 2
+Peter Fern
+.IP \[bu] 2
+zzq
+.IP \[bu] 2
+mac-15
.SH Contact the rclone project
.SS Forum
.PP