forked from TrueCloudLab/rclone
Compare commits
136 commits
tcl/master
...
add-xxh-bl
Author | SHA1 | Date | |
---|---|---|---|
|
87124ff6c3 | ||
|
0eecebfdb5 | ||
|
53a29fadf5 | ||
|
a137c6da2f | ||
|
3cb22501ac | ||
|
203f115661 | ||
|
a6273a904d | ||
|
a78bc093de | ||
|
2446c4928d | ||
|
e11e679e90 | ||
|
ba8e538173 | ||
|
40111ba5e1 | ||
|
ab58ae5b03 | ||
|
ca8860177e | ||
|
d65d1a44b3 | ||
|
c1763a3f95 | ||
|
964fcd5f59 | ||
|
c6281a1217 | ||
|
ff3f8f0b33 | ||
|
2d844a26c3 | ||
|
1b68492c85 | ||
|
acd5a893e2 | ||
|
0214a59a8c | ||
|
6079cab090 | ||
|
bf57087a6e | ||
|
d8bc542ffc | ||
|
01ccf204f4 | ||
|
84b64dcdf9 | ||
|
8cc1020a58 | ||
|
1e2b354456 | ||
|
f639cd9c78 | ||
|
e50f995d87 | ||
|
abe884e744 | ||
|
173b2ac956 | ||
|
1317fdb9b8 | ||
|
1072173d58 | ||
|
df19c6f7bf | ||
|
ee72554fb9 | ||
|
abb4f77568 | ||
|
ca2b27422f | ||
|
740f6b318c | ||
|
f307d929a8 | ||
|
ceea6753ee | ||
|
2bafbf3c04 | ||
|
3e14ba54b8 | ||
|
2f7a30cf61 | ||
|
0ad925278d | ||
|
e3053350f3 | ||
|
b9207e5727 | ||
|
40159e7a16 | ||
|
16baa24964 | ||
|
72f06bcc4b | ||
|
c527dd8c9c | ||
|
29fd894189 | ||
|
175aa07cdd | ||
|
75257fc9cd | ||
|
53ff3b3b32 | ||
|
8b4b59412d | ||
|
264c9fb2c0 | ||
|
1b10cd3732 | ||
|
d97492cbc3 | ||
|
82a510e793 | ||
|
9f2c590e13 | ||
|
11a90917ec | ||
|
8ca7b2af07 | ||
|
a19ddffe92 | ||
|
3e2c0f8c04 | ||
|
589458d1fe | ||
|
69897b97fb | ||
|
4db09331c6 | ||
|
fcd3b88332 | ||
|
1ca3f12672 | ||
|
e7a0fd0f70 | ||
|
c23c59544d | ||
|
9dec3de990 | ||
|
5caa695c79 | ||
|
8400809900 | ||
|
e49516d5f4 | ||
|
9614fc60f2 | ||
|
51db76fd47 | ||
|
17e7ccfad5 | ||
|
8a6fc8535d | ||
|
c053429b9c | ||
|
18989fbf85 | ||
|
a7451c6a77 | ||
|
5147d1101c | ||
|
11ad2a1316 | ||
|
3c7ad8d961 | ||
|
a3e8fb584a | ||
|
9b4b3033da | ||
|
94997d25d2 | ||
|
19458e8459 | ||
|
7d32da441e | ||
|
22e13eea47 | ||
|
de9b593f02 | ||
|
b2b4f8196c | ||
|
84cebb6872 | ||
|
cb9f4f8461 | ||
|
498d9cfa85 | ||
|
109e4ed0ed | ||
|
353270263a | ||
|
f8d782c02d | ||
|
3dec664a19 | ||
|
a849fd59f0 | ||
|
462a1cf491 | ||
|
0b7b3cacdc | ||
|
976103d50b | ||
|
192524c004 | ||
|
28667f58bf | ||
|
c669f4e218 | ||
|
1a9e6a527d | ||
|
8c48cadd9c | ||
|
76e1ba8c46 | ||
|
232e4cd18f | ||
|
88141928f2 | ||
|
a2a0388036 | ||
|
48543d38e8 | ||
|
eceb390152 | ||
|
f4deffdc96 | ||
|
c172742cef | ||
|
7daed30754 | ||
|
b1b4c7f27b | ||
|
ed84553dc1 | ||
|
c94edbb76b | ||
|
2dcb327bc0 | ||
|
874d66658e | ||
|
3af757e26d | ||
|
fef1b61585 | ||
|
3fca7a60a5 | ||
|
6b3f41fa0c | ||
|
3d0ee47aa2 | ||
|
da70088b11 | ||
|
1bc9b94cf2 | ||
|
15a026d3be | ||
|
ad122c6f6f | ||
|
b155231cdd |
149 changed files with 6097 additions and 10504 deletions
10
.github/workflows/build.yml
vendored
10
.github/workflows/build.yml
vendored
|
@ -17,12 +17,11 @@ on:
|
|||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -124,7 +123,8 @@ jobs:
|
|||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
|
@ -217,7 +217,7 @@ jobs:
|
|||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -296,7 +296,7 @@ jobs:
|
|||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
|||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
|
|
336
MANUAL.html
generated
336
MANUAL.html
generated
|
@ -81,7 +81,7 @@
|
|||
<header id="title-block-header">
|
||||
<h1 class="title">rclone(1) User Manual</h1>
|
||||
<p class="author">Nick Craig-Wood</p>
|
||||
<p class="date">Nov 15, 2024</p>
|
||||
<p class="date">Sep 08, 2024</p>
|
||||
</header>
|
||||
<h1 id="rclone-syncs-your-files-to-cloud-storage">Rclone syncs your files to cloud storage</h1>
|
||||
<p><img width="50%" src="https://rclone.org/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" ></p>
|
||||
|
@ -2964,9 +2964,7 @@ rclone mount remote:path/to/files \\cloud\remote</code></pre>
|
|||
<p>When running in background mode the user will have to stop the mount manually:</p>
|
||||
<pre><code># Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount</code></pre>
|
||||
<p>The umount operation can fail, for example when the mountpoint is busy. When that happens, it is the user's responsibility to stop the mount manually.</p>
|
||||
<p>The size of the mounted file system will be set according to information retrieved from the remote, the same as returned by the <a href="https://rclone.org/commands/rclone_about/">rclone about</a> command. Remotes with unlimited storage may report the used size only, then an additional 1 PiB of free space is assumed. If the remote does not <a href="https://rclone.org/overview/#optional-features">support</a> the about feature at all, then 1 PiB is set as both the total and the free size.</p>
|
||||
|
@ -3050,7 +3048,7 @@ sudo ln -s /opt/local/lib/libfuse.2.dylib</code></pre>
|
|||
<p>Note that all the rclone filters can be used to select a subset of the files to be visible in the mount.</p>
|
||||
<h2 id="systemd">systemd</h2>
|
||||
<p>When running rclone mount as a systemd service, it is possible to use Type=notify. In this case the service will enter the started state after the mountpoint has been successfully set up. Units having the rclone mount service specified as a requirement will see all files and folders immediately in this mode.</p>
|
||||
<p>Note that systemd runs mount units without any environment variables including <code>PATH</code> or <code>HOME</code>. This means that tilde (<code>~</code>) expansion will not work and you should provide <code>--config</code> and <code>--cache-dir</code> explicitly as absolute paths via rclone arguments. Since mounting requires the <code>fusermount</code> or <code>fusermount3</code> program, rclone will use the fallback PATH of <code>/bin:/usr/bin</code> in this scenario. Please ensure that <code>fusermount</code>/<code>fusermount3</code> is present on this PATH.</p>
|
||||
<p>Note that systemd runs mount units without any environment variables including <code>PATH</code> or <code>HOME</code>. This means that tilde (<code>~</code>) expansion will not work and you should provide <code>--config</code> and <code>--cache-dir</code> explicitly as absolute paths via rclone arguments. Since mounting requires the <code>fusermount</code> program, rclone will use the fallback PATH of <code>/bin:/usr/bin</code> in this scenario. Please ensure that <code>fusermount</code> is present on this PATH.</p>
|
||||
<h2 id="rclone-as-unix-mount-helper">Rclone as Unix mount helper</h2>
|
||||
<p>The core Unix program <code>/bin/mount</code> normally takes the <code>-t FSTYPE</code> argument then runs the <code>/sbin/mount.FSTYPE</code> helper program passing it mount options as <code>-o key=val,...</code> or <code>--opt=...</code>. Automount (classic or systemd) behaves in a similar way.</p>
|
||||
<p>rclone by default expects GNU-style flags <code>--key val</code>. To run it as a mount helper you should symlink rclone binary to <code>/sbin/mount.rclone</code> and optionally <code>/usr/bin/rclonefs</code>, e.g. <code>ln -s /usr/bin/rclone /sbin/mount.rclone</code>. rclone will detect it and translate command-line arguments appropriately.</p>
|
||||
|
@ -3484,9 +3482,7 @@ rclone nfsmount remote:path/to/files \\cloud\remote</code></pre>
|
|||
<p>When running in background mode the user will have to stop the mount manually:</p>
|
||||
<pre><code># Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount</code></pre>
|
||||
<p>The umount operation can fail, for example when the mountpoint is busy. When that happens, it is the user's responsibility to stop the mount manually.</p>
|
||||
<p>The size of the mounted file system will be set according to information retrieved from the remote, the same as returned by the <a href="https://rclone.org/commands/rclone_about/">rclone about</a> command. Remotes with unlimited storage may report the used size only, then an additional 1 PiB of free space is assumed. If the remote does not <a href="https://rclone.org/overview/#optional-features">support</a> the about feature at all, then 1 PiB is set as both the total and the free size.</p>
|
||||
|
@ -3570,7 +3566,7 @@ sudo ln -s /opt/local/lib/libfuse.2.dylib</code></pre>
|
|||
<p>Note that all the rclone filters can be used to select a subset of the files to be visible in the mount.</p>
|
||||
<h2 id="systemd-1">systemd</h2>
|
||||
<p>When running rclone nfsmount as a systemd service, it is possible to use Type=notify. In this case the service will enter the started state after the mountpoint has been successfully set up. Units having the rclone nfsmount service specified as a requirement will see all files and folders immediately in this mode.</p>
|
||||
<p>Note that systemd runs mount units without any environment variables including <code>PATH</code> or <code>HOME</code>. This means that tilde (<code>~</code>) expansion will not work and you should provide <code>--config</code> and <code>--cache-dir</code> explicitly as absolute paths via rclone arguments. Since mounting requires the <code>fusermount</code> or <code>fusermount3</code> program, rclone will use the fallback PATH of <code>/bin:/usr/bin</code> in this scenario. Please ensure that <code>fusermount</code>/<code>fusermount3</code> is present on this PATH.</p>
|
||||
<p>Note that systemd runs mount units without any environment variables including <code>PATH</code> or <code>HOME</code>. This means that tilde (<code>~</code>) expansion will not work and you should provide <code>--config</code> and <code>--cache-dir</code> explicitly as absolute paths via rclone arguments. Since mounting requires the <code>fusermount</code> program, rclone will use the fallback PATH of <code>/bin:/usr/bin</code> in this scenario. Please ensure that <code>fusermount</code> is present on this PATH.</p>
|
||||
<h2 id="rclone-as-unix-mount-helper-1">Rclone as Unix mount helper</h2>
|
||||
<p>The core Unix program <code>/bin/mount</code> normally takes the <code>-t FSTYPE</code> argument then runs the <code>/sbin/mount.FSTYPE</code> helper program passing it mount options as <code>-o key=val,...</code> or <code>--opt=...</code>. Automount (classic or systemd) behaves in a similar way.</p>
|
||||
<p>rclone by default expects GNU-style flags <code>--key val</code>. To run it as a mount helper you should symlink rclone binary to <code>/sbin/mount.rclone</code> and optionally <code>/usr/bin/rclonefs</code>, e.g. <code>ln -s /usr/bin/rclone /sbin/mount.rclone</code>. rclone will detect it and translate command-line arguments appropriately.</p>
|
||||
|
@ -4060,7 +4056,7 @@ htpasswd -B htpasswd anotherUser</code></pre>
|
|||
<h3 id="rc-options">RC Options</h3>
|
||||
<p>Flags to control the Remote Control API</p>
|
||||
<pre><code> --rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -7853,38 +7849,6 @@ export RCLONE_CONFIG_PASS</code></pre>
|
|||
<p>Verbosity is slightly different, the environment variable equivalent of <code>--verbose</code> or <code>-v</code> is <code>RCLONE_VERBOSE=1</code>, or for <code>-vv</code>, <code>RCLONE_VERBOSE=2</code>.</p>
|
||||
<p>The same parser is used for the options and the environment variables so they take exactly the same form.</p>
|
||||
<p>The options set by environment variables can be seen with the <code>-vv</code> flag, e.g. <code>rclone version -vv</code>.</p>
|
||||
<p>Options that can appear multiple times (type <code>stringArray</code>) are treated slighly differently as environment variables can only be defined once. In order to allow a simple mechanism for adding one or many items, the input is treated as a <a href="https://godoc.org/encoding/csv">CSV encoded</a> string. For example</p>
|
||||
<table>
|
||||
<colgroup>
|
||||
<col style="width: 52%" />
|
||||
<col style="width: 47%" />
|
||||
</colgroup>
|
||||
<thead>
|
||||
<tr class="header">
|
||||
<th>Environment Variable</th>
|
||||
<th>Equivalent options</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr class="odd">
|
||||
<td><code>RCLONE_EXCLUDE="*.jpg"</code></td>
|
||||
<td><code>--exclude "*.jpg"</code></td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td><code>RCLONE_EXCLUDE="*.jpg,*.png"</code></td>
|
||||
<td><code>--exclude "*.jpg"</code> <code>--exclude "*.png"</code></td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td><code>RCLONE_EXCLUDE='"*.jpg","*.png"'</code></td>
|
||||
<td><code>--exclude "*.jpg"</code> <code>--exclude "*.png"</code></td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td><code>RCLONE_EXCLUDE='"/directory with comma , in it /**"'</code></td>
|
||||
<td>`--exclude "/directory with comma , in it /**"</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p>If <code>stringArray</code> options are defined as environment variables <strong>and</strong> options on the command line then all the values will be used.</p>
|
||||
<h3 id="config-file">Config file</h3>
|
||||
<p>You can set defaults for values in the config file on an individual remote basis. The names of the config items are documented in the page for each backend.</p>
|
||||
<p>To find the name of the environment variable, you need to set, take <code>RCLONE_CONFIG_</code> + name of remote + <code>_</code> + name of config file option and make it all uppercase. Note one implication here is the remote's name must be convertible into a valid environment variable name, so it can only contain letters, digits, or the <code>_</code> (underscore) character.</p>
|
||||
|
@ -8323,14 +8287,12 @@ file2.avi</code></pre>
|
|||
<p>Adds path/file names to an rclone command based on rules in a named file. The file contains a list of remarks and pattern rules. Include rules start with <code>+</code> and exclude rules with <code>-</code>. <code>!</code> clears existing rules. Rules are processed in the order they are defined.</p>
|
||||
<p>This flag can be repeated. See above for the order filter flags are processed in.</p>
|
||||
<p>Arrange the order of filter rules with the most restrictive first and work down.</p>
|
||||
<p>Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. <em>Use <code>-vv --dump filters</code> to see how they appear in the final regexp.</em></p>
|
||||
<p>E.g. for <code>filter-file.txt</code>:</p>
|
||||
<pre><code># a sample filter rule file
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
|
@ -10420,7 +10382,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
|||
<tr class="odd">
|
||||
<td>pCloud</td>
|
||||
<td style="text-align: center;">MD5, SHA1 ⁷</td>
|
||||
<td style="text-align: center;">R/W</td>
|
||||
<td style="text-align: center;">R</td>
|
||||
<td style="text-align: center;">No</td>
|
||||
<td style="text-align: center;">No</td>
|
||||
<td style="text-align: center;">W</td>
|
||||
|
@ -11970,7 +11932,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
|||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2")</code></pre>
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")</code></pre>
|
||||
<h2 id="performance">Performance</h2>
|
||||
<p>Flags helpful for increasing performance.</p>
|
||||
<pre><code> --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
|
||||
|
@ -12071,7 +12033,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
|||
<h2 id="rc-1">RC</h2>
|
||||
<p>Flags to control the Remote Control API.</p>
|
||||
<pre><code> --rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -12101,7 +12063,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
|||
--rc-web-gui-update Check and update to latest version of web gui</code></pre>
|
||||
<h2 id="metrics-1">Metrics</h2>
|
||||
<p>Flags to control the Metrics HTTP endpoint..</p>
|
||||
<pre><code> --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
<pre><code> --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -12589,18 +12551,21 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
@ -14564,18 +14529,6 @@ y/e/d></code></pre>
|
|||
<p>By default, rclone will HEAD every object it uploads. It does this to check the object got uploaded correctly.</p>
|
||||
<p>You can disable this with the <a href="#s3-no-head">--s3-no-head</a> option - see there for more details.</p>
|
||||
<p>Setting this flag increases the chance for undetected upload failures.</p>
|
||||
<h3 id="increasing-performance">Increasing performance</h3>
|
||||
<h4 id="using-server-side-copy">Using server-side copy</h4>
|
||||
<p>If you are copying objects between S3 buckets in the same region, you should use server-side copy. This is much faster than downloading and re-uploading the objects, as no data is transferred.</p>
|
||||
<p>For rclone to use server-side copy, you must use the same remote for the source and destination.</p>
|
||||
<pre><code>rclone copy s3:source-bucket s3:destination-bucket</code></pre>
|
||||
<p>When using server-side copy, the performance is limited by the rate at which rclone issues API requests to S3. See below for how to increase the number of API requests rclone makes.</p>
|
||||
<h4 id="increasing-the-rate-of-api-requests">Increasing the rate of API requests</h4>
|
||||
<p>You can increase the rate of API requests to S3 by increasing the parallelism using <code>--transfers</code> and <code>--checkers</code> options.</p>
|
||||
<p>Rclone uses a very conservative defaults for these settings, as not all providers support high rates of requests. Depending on your provider, you can increase significantly the number of transfers and checkers.</p>
|
||||
<p>For example, with AWS S3, if you can increase the number of checkers to values like 200. If you are doing a server-side copy, you can also increase the number of transfers to 200.</p>
|
||||
<pre><code>rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket</code></pre>
|
||||
<p>You will need to experiment with these values to find the optimal settings for your setup.</p>
|
||||
<h3 id="versions">Versions</h3>
|
||||
<p>When bucket versioning is enabled (this can be done with rclone with the <a href="#versioning"><code>rclone backend versioning</code></a> command) when rclone uploads a new version of a file it creates a <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html">new version of it</a> Likewise when you delete a file, the old version will be marked hidden and still be available.</p>
|
||||
<p>Old versions of files, where available, are visible using the <a href="#s3-versions"><code>--s3-versions</code></a> flag.</p>
|
||||
|
@ -17170,7 +17123,7 @@ acl = private
|
|||
upload_cutoff = 5M
|
||||
chunk_size = 5M
|
||||
copy_cutoff = 5M</code></pre>
|
||||
<p><a href="https://www.scaleway.com/en/glacier-cold-storage/">Scaleway Glacier</a> is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" <code>storage_class</code>. So you can configure your remote with the <code>storage_class = GLACIER</code> option to upload directly to Scaleway Glacier. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)</p>
|
||||
<p><a href="https://www.online.net/en/storage/c14-cold-storage">C14 Cold Storage</a> is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" <code>storage_class</code>. So you can configure your remote with the <code>storage_class = GLACIER</code> option to upload directly to C14. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)</p>
|
||||
<h3 id="lyve">Seagate Lyve Cloud</h3>
|
||||
<p><a href="https://www.seagate.com/gb/en/services/cloud/storage/">Seagate Lyve Cloud</a> is an S3 compatible object storage platform from <a href="https://seagate.com/">Seagate</a> intended for enterprise use.</p>
|
||||
<p>Here is a config run through for a remote called <code>remote</code> - you may choose a different name of course. Note that to create an access key and secret key you will need to create a service account first.</p>
|
||||
|
@ -18369,7 +18322,7 @@ cos s3</code></pre>
|
|||
<p>For Netease NOS configure as per the configurator <code>rclone config</code> setting the provider <code>Netease</code>. This will automatically set <code>force_path_style = false</code> which is necessary for it to run properly.</p>
|
||||
<h3 id="petabox">Petabox</h3>
|
||||
<p>Here is an example of making a <a href="https://petabox.io/">Petabox</a> configuration. First run:</p>
|
||||
<div class="sourceCode" id="cb969"><pre class="sourceCode bash"><code class="sourceCode bash"><span id="cb969-1"><a href="#cb969-1" aria-hidden="true"></a><span class="ex">rclone</span> config</span></code></pre></div>
|
||||
<div class="sourceCode" id="cb967"><pre class="sourceCode bash"><code class="sourceCode bash"><span id="cb967-1"><a href="#cb967-1" aria-hidden="true"></a><span class="ex">rclone</span> config</span></code></pre></div>
|
||||
<p>This will guide you through an interactive setup process.</p>
|
||||
<pre><code>No remotes found, make a new one?
|
||||
n) New remote
|
||||
|
@ -24615,7 +24568,7 @@ rclone backend copyid drive: ID1 path1 ID2 path2</code></pre>
|
|||
<li><p>Click on the "+ CREATE CREDENTIALS" button at the top of the screen, then select "OAuth client ID".</p></li>
|
||||
<li><p>Choose an application type of "Desktop app" and click "Create". (the default name is fine)</p></li>
|
||||
<li><p>It will show you a client ID and client secret. Make a note of these.</p>
|
||||
<p>(If you selected "External" at Step 5 continue to Step 10. If you chose "Internal" you don't need to publish and can skip straight to Step 11 but your destination drive must be part of the same Google Workspace.)</p></li>
|
||||
<p>(If you selected "External" at Step 5 continue to Step 9. If you chose "Internal" you don't need to publish and can skip straight to Step 10 but your destination drive must be part of the same Google Workspace.)</p></li>
|
||||
<li><p>Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm. You will also want to add yourself as a test user.</p></li>
|
||||
<li><p>Provide the noted client ID and client secret to rclone.</p></li>
|
||||
</ol>
|
||||
|
@ -29707,75 +29660,75 @@ rclone rc vfs/refresh recursive=true</code></pre>
|
|||
<p>Permissions are also supported, if <code>--onedrive-metadata-permissions</code> is set. The accepted values for <code>--onedrive-metadata-permissions</code> are "<code>read</code>", "<code>write</code>", "<code>read,write</code>", and "<code>off</code>" (the default). "<code>write</code>" supports adding new permissions, updating the "role" of existing permissions, and removing permissions. Updating and removing require the Permission ID to be known, so it is recommended to use "<code>read,write</code>" instead of "<code>write</code>" if you wish to update/remove permissions.</p>
|
||||
<p>Permissions are read/written in JSON format using the same schema as the <a href="https://learn.microsoft.com/en-us/onedrive/developer/rest-api/resources/permission?view=odsp-graph-online">OneDrive API</a>, which differs slightly between OneDrive Personal and Business.</p>
|
||||
<p>Example for OneDrive Personal:</p>
|
||||
<div class="sourceCode" id="cb1260"><pre class="sourceCode json"><code class="sourceCode json"><span id="cb1260-1"><a href="#cb1260-1" aria-hidden="true"></a><span class="ot">[</span></span>
|
||||
<span id="cb1260-2"><a href="#cb1260-2" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1260-3"><a href="#cb1260-3" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"1234567890ABC!123"</span><span class="fu">,</span></span>
|
||||
<span id="cb1260-4"><a href="#cb1260-4" aria-hidden="true"></a> <span class="dt">"grantedTo"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1260-5"><a href="#cb1260-5" aria-hidden="true"></a> <span class="dt">"user"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1260-6"><a href="#cb1260-6" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"ryan@contoso.com"</span></span>
|
||||
<span id="cb1260-7"><a href="#cb1260-7" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1260-8"><a href="#cb1260-8" aria-hidden="true"></a> <span class="dt">"application"</span><span class="fu">:</span> <span class="fu">{},</span></span>
|
||||
<span id="cb1260-9"><a href="#cb1260-9" aria-hidden="true"></a> <span class="dt">"device"</span><span class="fu">:</span> <span class="fu">{}</span></span>
|
||||
<span id="cb1260-10"><a href="#cb1260-10" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1260-11"><a href="#cb1260-11" aria-hidden="true"></a> <span class="dt">"invitation"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1260-12"><a href="#cb1260-12" aria-hidden="true"></a> <span class="dt">"email"</span><span class="fu">:</span> <span class="st">"ryan@contoso.com"</span></span>
|
||||
<span id="cb1260-13"><a href="#cb1260-13" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1260-14"><a href="#cb1260-14" aria-hidden="true"></a> <span class="dt">"link"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1260-15"><a href="#cb1260-15" aria-hidden="true"></a> <span class="dt">"webUrl"</span><span class="fu">:</span> <span class="st">"https://1drv.ms/t/s!1234567890ABC"</span></span>
|
||||
<span id="cb1260-16"><a href="#cb1260-16" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1260-17"><a href="#cb1260-17" aria-hidden="true"></a> <span class="dt">"roles"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1260-18"><a href="#cb1260-18" aria-hidden="true"></a> <span class="st">"read"</span></span>
|
||||
<span id="cb1260-19"><a href="#cb1260-19" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1260-20"><a href="#cb1260-20" aria-hidden="true"></a> <span class="dt">"shareId"</span><span class="fu">:</span> <span class="st">"s!1234567890ABC"</span></span>
|
||||
<span id="cb1260-21"><a href="#cb1260-21" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1260-22"><a href="#cb1260-22" aria-hidden="true"></a><span class="ot">]</span></span></code></pre></div>
|
||||
<div class="sourceCode" id="cb1258"><pre class="sourceCode json"><code class="sourceCode json"><span id="cb1258-1"><a href="#cb1258-1" aria-hidden="true"></a><span class="ot">[</span></span>
|
||||
<span id="cb1258-2"><a href="#cb1258-2" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1258-3"><a href="#cb1258-3" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"1234567890ABC!123"</span><span class="fu">,</span></span>
|
||||
<span id="cb1258-4"><a href="#cb1258-4" aria-hidden="true"></a> <span class="dt">"grantedTo"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1258-5"><a href="#cb1258-5" aria-hidden="true"></a> <span class="dt">"user"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1258-6"><a href="#cb1258-6" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"ryan@contoso.com"</span></span>
|
||||
<span id="cb1258-7"><a href="#cb1258-7" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1258-8"><a href="#cb1258-8" aria-hidden="true"></a> <span class="dt">"application"</span><span class="fu">:</span> <span class="fu">{},</span></span>
|
||||
<span id="cb1258-9"><a href="#cb1258-9" aria-hidden="true"></a> <span class="dt">"device"</span><span class="fu">:</span> <span class="fu">{}</span></span>
|
||||
<span id="cb1258-10"><a href="#cb1258-10" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1258-11"><a href="#cb1258-11" aria-hidden="true"></a> <span class="dt">"invitation"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1258-12"><a href="#cb1258-12" aria-hidden="true"></a> <span class="dt">"email"</span><span class="fu">:</span> <span class="st">"ryan@contoso.com"</span></span>
|
||||
<span id="cb1258-13"><a href="#cb1258-13" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1258-14"><a href="#cb1258-14" aria-hidden="true"></a> <span class="dt">"link"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1258-15"><a href="#cb1258-15" aria-hidden="true"></a> <span class="dt">"webUrl"</span><span class="fu">:</span> <span class="st">"https://1drv.ms/t/s!1234567890ABC"</span></span>
|
||||
<span id="cb1258-16"><a href="#cb1258-16" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1258-17"><a href="#cb1258-17" aria-hidden="true"></a> <span class="dt">"roles"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1258-18"><a href="#cb1258-18" aria-hidden="true"></a> <span class="st">"read"</span></span>
|
||||
<span id="cb1258-19"><a href="#cb1258-19" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1258-20"><a href="#cb1258-20" aria-hidden="true"></a> <span class="dt">"shareId"</span><span class="fu">:</span> <span class="st">"s!1234567890ABC"</span></span>
|
||||
<span id="cb1258-21"><a href="#cb1258-21" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1258-22"><a href="#cb1258-22" aria-hidden="true"></a><span class="ot">]</span></span></code></pre></div>
|
||||
<p>Example for OneDrive Business:</p>
|
||||
<div class="sourceCode" id="cb1261"><pre class="sourceCode json"><code class="sourceCode json"><span id="cb1261-1"><a href="#cb1261-1" aria-hidden="true"></a><span class="ot">[</span></span>
|
||||
<span id="cb1261-2"><a href="#cb1261-2" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1261-3"><a href="#cb1261-3" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"48d31887-5fad-4d73-a9f5-3c356e68a038"</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-4"><a href="#cb1261-4" aria-hidden="true"></a> <span class="dt">"grantedToIdentities"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1261-5"><a href="#cb1261-5" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1261-6"><a href="#cb1261-6" aria-hidden="true"></a> <span class="dt">"user"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1261-7"><a href="#cb1261-7" aria-hidden="true"></a> <span class="dt">"displayName"</span><span class="fu">:</span> <span class="st">"ryan@contoso.com"</span></span>
|
||||
<span id="cb1261-8"><a href="#cb1261-8" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1261-9"><a href="#cb1261-9" aria-hidden="true"></a> <span class="dt">"application"</span><span class="fu">:</span> <span class="fu">{},</span></span>
|
||||
<span id="cb1261-10"><a href="#cb1261-10" aria-hidden="true"></a> <span class="dt">"device"</span><span class="fu">:</span> <span class="fu">{}</span></span>
|
||||
<span id="cb1261-11"><a href="#cb1261-11" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1261-12"><a href="#cb1261-12" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-13"><a href="#cb1261-13" aria-hidden="true"></a> <span class="dt">"link"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1261-14"><a href="#cb1261-14" aria-hidden="true"></a> <span class="dt">"type"</span><span class="fu">:</span> <span class="st">"view"</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-15"><a href="#cb1261-15" aria-hidden="true"></a> <span class="dt">"scope"</span><span class="fu">:</span> <span class="st">"users"</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-16"><a href="#cb1261-16" aria-hidden="true"></a> <span class="dt">"webUrl"</span><span class="fu">:</span> <span class="st">"https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"</span></span>
|
||||
<span id="cb1261-17"><a href="#cb1261-17" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1261-18"><a href="#cb1261-18" aria-hidden="true"></a> <span class="dt">"roles"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1261-19"><a href="#cb1261-19" aria-hidden="true"></a> <span class="st">"read"</span></span>
|
||||
<span id="cb1261-20"><a href="#cb1261-20" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-21"><a href="#cb1261-21" aria-hidden="true"></a> <span class="dt">"shareId"</span><span class="fu">:</span> <span class="st">"u!LKj1lkdlals90j1nlkascl"</span></span>
|
||||
<span id="cb1261-22"><a href="#cb1261-22" aria-hidden="true"></a> <span class="fu">}</span><span class="ot">,</span></span>
|
||||
<span id="cb1261-23"><a href="#cb1261-23" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1261-24"><a href="#cb1261-24" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"5D33DD65C6932946"</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-25"><a href="#cb1261-25" aria-hidden="true"></a> <span class="dt">"grantedTo"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1261-26"><a href="#cb1261-26" aria-hidden="true"></a> <span class="dt">"user"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1261-27"><a href="#cb1261-27" aria-hidden="true"></a> <span class="dt">"displayName"</span><span class="fu">:</span> <span class="st">"John Doe"</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-28"><a href="#cb1261-28" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"efee1b77-fb3b-4f65-99d6-274c11914d12"</span></span>
|
||||
<span id="cb1261-29"><a href="#cb1261-29" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1261-30"><a href="#cb1261-30" aria-hidden="true"></a> <span class="dt">"application"</span><span class="fu">:</span> <span class="fu">{},</span></span>
|
||||
<span id="cb1261-31"><a href="#cb1261-31" aria-hidden="true"></a> <span class="dt">"device"</span><span class="fu">:</span> <span class="fu">{}</span></span>
|
||||
<span id="cb1261-32"><a href="#cb1261-32" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1261-33"><a href="#cb1261-33" aria-hidden="true"></a> <span class="dt">"roles"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1261-34"><a href="#cb1261-34" aria-hidden="true"></a> <span class="st">"owner"</span></span>
|
||||
<span id="cb1261-35"><a href="#cb1261-35" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1261-36"><a href="#cb1261-36" aria-hidden="true"></a> <span class="dt">"shareId"</span><span class="fu">:</span> <span class="st">"FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"</span></span>
|
||||
<span id="cb1261-37"><a href="#cb1261-37" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1261-38"><a href="#cb1261-38" aria-hidden="true"></a><span class="ot">]</span></span></code></pre></div>
|
||||
<div class="sourceCode" id="cb1259"><pre class="sourceCode json"><code class="sourceCode json"><span id="cb1259-1"><a href="#cb1259-1" aria-hidden="true"></a><span class="ot">[</span></span>
|
||||
<span id="cb1259-2"><a href="#cb1259-2" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1259-3"><a href="#cb1259-3" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"48d31887-5fad-4d73-a9f5-3c356e68a038"</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-4"><a href="#cb1259-4" aria-hidden="true"></a> <span class="dt">"grantedToIdentities"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1259-5"><a href="#cb1259-5" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1259-6"><a href="#cb1259-6" aria-hidden="true"></a> <span class="dt">"user"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1259-7"><a href="#cb1259-7" aria-hidden="true"></a> <span class="dt">"displayName"</span><span class="fu">:</span> <span class="st">"ryan@contoso.com"</span></span>
|
||||
<span id="cb1259-8"><a href="#cb1259-8" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1259-9"><a href="#cb1259-9" aria-hidden="true"></a> <span class="dt">"application"</span><span class="fu">:</span> <span class="fu">{},</span></span>
|
||||
<span id="cb1259-10"><a href="#cb1259-10" aria-hidden="true"></a> <span class="dt">"device"</span><span class="fu">:</span> <span class="fu">{}</span></span>
|
||||
<span id="cb1259-11"><a href="#cb1259-11" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1259-12"><a href="#cb1259-12" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-13"><a href="#cb1259-13" aria-hidden="true"></a> <span class="dt">"link"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1259-14"><a href="#cb1259-14" aria-hidden="true"></a> <span class="dt">"type"</span><span class="fu">:</span> <span class="st">"view"</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-15"><a href="#cb1259-15" aria-hidden="true"></a> <span class="dt">"scope"</span><span class="fu">:</span> <span class="st">"users"</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-16"><a href="#cb1259-16" aria-hidden="true"></a> <span class="dt">"webUrl"</span><span class="fu">:</span> <span class="st">"https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"</span></span>
|
||||
<span id="cb1259-17"><a href="#cb1259-17" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1259-18"><a href="#cb1259-18" aria-hidden="true"></a> <span class="dt">"roles"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1259-19"><a href="#cb1259-19" aria-hidden="true"></a> <span class="st">"read"</span></span>
|
||||
<span id="cb1259-20"><a href="#cb1259-20" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-21"><a href="#cb1259-21" aria-hidden="true"></a> <span class="dt">"shareId"</span><span class="fu">:</span> <span class="st">"u!LKj1lkdlals90j1nlkascl"</span></span>
|
||||
<span id="cb1259-22"><a href="#cb1259-22" aria-hidden="true"></a> <span class="fu">}</span><span class="ot">,</span></span>
|
||||
<span id="cb1259-23"><a href="#cb1259-23" aria-hidden="true"></a> <span class="fu">{</span></span>
|
||||
<span id="cb1259-24"><a href="#cb1259-24" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"5D33DD65C6932946"</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-25"><a href="#cb1259-25" aria-hidden="true"></a> <span class="dt">"grantedTo"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1259-26"><a href="#cb1259-26" aria-hidden="true"></a> <span class="dt">"user"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1259-27"><a href="#cb1259-27" aria-hidden="true"></a> <span class="dt">"displayName"</span><span class="fu">:</span> <span class="st">"John Doe"</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-28"><a href="#cb1259-28" aria-hidden="true"></a> <span class="dt">"id"</span><span class="fu">:</span> <span class="st">"efee1b77-fb3b-4f65-99d6-274c11914d12"</span></span>
|
||||
<span id="cb1259-29"><a href="#cb1259-29" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1259-30"><a href="#cb1259-30" aria-hidden="true"></a> <span class="dt">"application"</span><span class="fu">:</span> <span class="fu">{},</span></span>
|
||||
<span id="cb1259-31"><a href="#cb1259-31" aria-hidden="true"></a> <span class="dt">"device"</span><span class="fu">:</span> <span class="fu">{}</span></span>
|
||||
<span id="cb1259-32"><a href="#cb1259-32" aria-hidden="true"></a> <span class="fu">},</span></span>
|
||||
<span id="cb1259-33"><a href="#cb1259-33" aria-hidden="true"></a> <span class="dt">"roles"</span><span class="fu">:</span> <span class="ot">[</span></span>
|
||||
<span id="cb1259-34"><a href="#cb1259-34" aria-hidden="true"></a> <span class="st">"owner"</span></span>
|
||||
<span id="cb1259-35"><a href="#cb1259-35" aria-hidden="true"></a> <span class="ot">]</span><span class="fu">,</span></span>
|
||||
<span id="cb1259-36"><a href="#cb1259-36" aria-hidden="true"></a> <span class="dt">"shareId"</span><span class="fu">:</span> <span class="st">"FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"</span></span>
|
||||
<span id="cb1259-37"><a href="#cb1259-37" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1259-38"><a href="#cb1259-38" aria-hidden="true"></a><span class="ot">]</span></span></code></pre></div>
|
||||
<p>To write permissions, pass in a "permissions" metadata key using this same format. The <a href="https://rclone.org/docs/#metadata-mapper"><code>--metadata-mapper</code></a> tool can be very helpful for this.</p>
|
||||
<p>When adding permissions, an email address can be provided in the <code>User.ID</code> or <code>DisplayName</code> properties of <code>grantedTo</code> or <code>grantedToIdentities</code>. Alternatively, an ObjectID can be provided in <code>User.ID</code>. At least one valid recipient must be provided in order to add a permission for a user. Creating a Public Link is also supported, if <code>Link.Scope</code> is set to <code>"anonymous"</code>.</p>
|
||||
<p>Example request to add a "read" permission with <code>--metadata-mapper</code>:</p>
|
||||
<div class="sourceCode" id="cb1262"><pre class="sourceCode json"><code class="sourceCode json"><span id="cb1262-1"><a href="#cb1262-1" aria-hidden="true"></a><span class="fu">{</span></span>
|
||||
<span id="cb1262-2"><a href="#cb1262-2" aria-hidden="true"></a> <span class="dt">"Metadata"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1262-3"><a href="#cb1262-3" aria-hidden="true"></a> <span class="dt">"permissions"</span><span class="fu">:</span> <span class="st">"[{</span><span class="ch">\"</span><span class="st">grantedToIdentities</span><span class="ch">\"</span><span class="st">:[{</span><span class="ch">\"</span><span class="st">user</span><span class="ch">\"</span><span class="st">:{</span><span class="ch">\"</span><span class="st">id</span><span class="ch">\"</span><span class="st">:</span><span class="ch">\"</span><span class="st">ryan@contoso.com</span><span class="ch">\"</span><span class="st">}}],</span><span class="ch">\"</span><span class="st">roles</span><span class="ch">\"</span><span class="st">:[</span><span class="ch">\"</span><span class="st">read</span><span class="ch">\"</span><span class="st">]}]"</span></span>
|
||||
<span id="cb1262-4"><a href="#cb1262-4" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1262-5"><a href="#cb1262-5" aria-hidden="true"></a><span class="fu">}</span></span></code></pre></div>
|
||||
<div class="sourceCode" id="cb1260"><pre class="sourceCode json"><code class="sourceCode json"><span id="cb1260-1"><a href="#cb1260-1" aria-hidden="true"></a><span class="fu">{</span></span>
|
||||
<span id="cb1260-2"><a href="#cb1260-2" aria-hidden="true"></a> <span class="dt">"Metadata"</span><span class="fu">:</span> <span class="fu">{</span></span>
|
||||
<span id="cb1260-3"><a href="#cb1260-3" aria-hidden="true"></a> <span class="dt">"permissions"</span><span class="fu">:</span> <span class="st">"[{</span><span class="ch">\"</span><span class="st">grantedToIdentities</span><span class="ch">\"</span><span class="st">:[{</span><span class="ch">\"</span><span class="st">user</span><span class="ch">\"</span><span class="st">:{</span><span class="ch">\"</span><span class="st">id</span><span class="ch">\"</span><span class="st">:</span><span class="ch">\"</span><span class="st">ryan@contoso.com</span><span class="ch">\"</span><span class="st">}}],</span><span class="ch">\"</span><span class="st">roles</span><span class="ch">\"</span><span class="st">:[</span><span class="ch">\"</span><span class="st">read</span><span class="ch">\"</span><span class="st">]}]"</span></span>
|
||||
<span id="cb1260-4"><a href="#cb1260-4" aria-hidden="true"></a> <span class="fu">}</span></span>
|
||||
<span id="cb1260-5"><a href="#cb1260-5" aria-hidden="true"></a><span class="fu">}</span></span></code></pre></div>
|
||||
<p>Note that adding a permission can fail if a conflicting permission already exists for the file/folder.</p>
|
||||
<p>To update an existing permission, include both the Permission ID and the new <code>roles</code> to be assigned. <code>roles</code> is the only property that can be changed.</p>
|
||||
<p>To remove permissions, pass in a blob containing only the permissions you wish to keep (which can be empty, to remove all.) Note that the <code>owner</code> role will be ignored, as it cannot be removed.</p>
|
||||
|
@ -32247,24 +32200,54 @@ y/e/d> y</code></pre>
|
|||
</ul>
|
||||
<h3 id="advanced-options-42">Advanced options</h3>
|
||||
<p>Here are the Advanced options specific to pikpak (PikPak).</p>
|
||||
<h4 id="pikpak-device-id">--pikpak-device-id</h4>
|
||||
<p>Device ID used for authorization.</p>
|
||||
<h4 id="pikpak-client-id">--pikpak-client-id</h4>
|
||||
<p>OAuth Client Id.</p>
|
||||
<p>Leave blank normally.</p>
|
||||
<p>Properties:</p>
|
||||
<ul>
|
||||
<li>Config: device_id</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_DEVICE_ID</li>
|
||||
<li>Config: client_id</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_CLIENT_ID</li>
|
||||
<li>Type: string</li>
|
||||
<li>Required: false</li>
|
||||
</ul>
|
||||
<h4 id="pikpak-user-agent">--pikpak-user-agent</h4>
|
||||
<p>HTTP user agent for pikpak.</p>
|
||||
<p>Defaults to "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" or "--pikpak-user-agent" provided on command line.</p>
|
||||
<h4 id="pikpak-client-secret">--pikpak-client-secret</h4>
|
||||
<p>OAuth Client Secret.</p>
|
||||
<p>Leave blank normally.</p>
|
||||
<p>Properties:</p>
|
||||
<ul>
|
||||
<li>Config: user_agent</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_USER_AGENT</li>
|
||||
<li>Config: client_secret</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_CLIENT_SECRET</li>
|
||||
<li>Type: string</li>
|
||||
<li>Default: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"</li>
|
||||
<li>Required: false</li>
|
||||
</ul>
|
||||
<h4 id="pikpak-token">--pikpak-token</h4>
|
||||
<p>OAuth Access Token as a JSON blob.</p>
|
||||
<p>Properties:</p>
|
||||
<ul>
|
||||
<li>Config: token</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_TOKEN</li>
|
||||
<li>Type: string</li>
|
||||
<li>Required: false</li>
|
||||
</ul>
|
||||
<h4 id="pikpak-auth-url">--pikpak-auth-url</h4>
|
||||
<p>Auth server URL.</p>
|
||||
<p>Leave blank to use the provider defaults.</p>
|
||||
<p>Properties:</p>
|
||||
<ul>
|
||||
<li>Config: auth_url</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_AUTH_URL</li>
|
||||
<li>Type: string</li>
|
||||
<li>Required: false</li>
|
||||
</ul>
|
||||
<h4 id="pikpak-token-url">--pikpak-token-url</h4>
|
||||
<p>Token server url.</p>
|
||||
<p>Leave blank to use the provider defaults.</p>
|
||||
<p>Properties:</p>
|
||||
<ul>
|
||||
<li>Config: token_url</li>
|
||||
<li>Env Var: RCLONE_PIKPAK_TOKEN_URL</li>
|
||||
<li>Type: string</li>
|
||||
<li>Required: false</li>
|
||||
</ul>
|
||||
<h4 id="pikpak-root-folder-id">--pikpak-root-folder-id</h4>
|
||||
<p>ID of the root folder. Leave blank normally.</p>
|
||||
|
@ -36966,79 +36949,6 @@ $ tree /tmp/c
|
|||
<li>"error": return an error based on option value</li>
|
||||
</ul>
|
||||
<h1 id="changelog-1">Changelog</h1>
|
||||
<h2 id="v1.68.2---2024-11-15">v1.68.2 - 2024-11-15</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.68.1...v1.68.2">See commits</a></p>
|
||||
<ul>
|
||||
<li>Security fixes
|
||||
<ul>
|
||||
<li>local backend: CVE-2024-52522: fix permission and ownership on symlinks with <code>--links</code> and <code>--metadata</code> (Nick Craig-Wood)
|
||||
<ul>
|
||||
<li>Only affects users using <code>--metadata</code> and <code>--links</code> and copying files to the local backend</li>
|
||||
<li>See https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv</li>
|
||||
</ul></li>
|
||||
<li>build: bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (dependabot)
|
||||
<ul>
|
||||
<li>This is an issue in a dependency which is used for JWT certificates</li>
|
||||
<li>See https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r</li>
|
||||
</ul></li>
|
||||
</ul></li>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>accounting: Fix wrong message on SIGUSR2 to enable/disable bwlimit (Nick Craig-Wood)</li>
|
||||
<li>bisync: Fix output capture restoring the wrong output for logrus (Dimitrios Slamaris)</li>
|
||||
<li>dlna: Fix loggingResponseWriter disregarding log level (Simon Bos)</li>
|
||||
<li>serve s3: Fix excess locking which was making serve s3 single threaded (Nick Craig-Wood)</li>
|
||||
<li>doc fixes (Nick Craig-Wood, tgfisher, Alexandre Hamez, Randy Bush)</li>
|
||||
</ul></li>
|
||||
<li>Local
|
||||
<ul>
|
||||
<li>Fix permission and ownership on symlinks with <code>--links</code> and <code>--metadata</code> (Nick Craig-Wood)</li>
|
||||
<li>Fix <code>--copy-links</code> on macOS when cloning (nielash)</li>
|
||||
</ul></li>
|
||||
<li>Onedrive
|
||||
<ul>
|
||||
<li>Fix Retry-After handling to look at 503 errors also (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Pikpak
|
||||
<ul>
|
||||
<li>Fix cid/gcid calculations for fs.OverrideRemote (wiserain)</li>
|
||||
<li>Fix fatal crash on startup with token that can't be refreshed (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Fix crash when using <code>--s3-download-url</code> after migration to SDKv2 (Nick Craig-Wood)</li>
|
||||
<li>Storj provider: fix server-side copy of files bigger than 5GB (Kaloyan Raev)</li>
|
||||
<li>Fix multitenant multipart uploads with CEPH (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.68.1---2024-09-24">v1.68.1 - 2024-09-24</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1">See commits</a></p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>build: Fix docker release build (ttionya)</li>
|
||||
<li>doc fixes (Nick Craig-Wood, Pawel Palucha)</li>
|
||||
<li>fs
|
||||
<ul>
|
||||
<li>Fix <code>--dump filters</code> not always appearing (Nick Craig-Wood)</li>
|
||||
<li>Fix setting <code>stringArray</code> config values from environment variables (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>rc: Fix default value of <code>--metrics-addr</code> (Nick Craig-Wood)</li>
|
||||
<li>serve docker: Add missing <code>vfs-read-chunk-streams</code> option in docker volume driver (Divyam)</li>
|
||||
</ul></li>
|
||||
<li>Onedrive
|
||||
<ul>
|
||||
<li>Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Pikpak
|
||||
<ul>
|
||||
<li>Fix login issue where token retrieval fails (wiserain)</li>
|
||||
</ul></li>
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Fix rclone ignoring static credentials when <code>env_auth=true</code> (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.68.0---2024-09-08">v1.68.0 - 2024-09-08</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0">See commits</a></p>
|
||||
<ul>
|
||||
|
|
206
MANUAL.md
generated
206
MANUAL.md
generated
|
@ -1,6 +1,6 @@
|
|||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Nov 15, 2024
|
||||
% Sep 08, 2024
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
|
@ -5259,9 +5259,7 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -5605,9 +5603,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
@ -6474,9 +6472,7 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -6820,9 +6816,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
@ -7738,7 +7734,7 @@ Flags to control the Remote Control API
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -16258,22 +16254,6 @@ so they take exactly the same form.
|
|||
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
||||
| Environment Variable | Equivalent options |
|
||||
|----------------------|--------------------|
|
||||
| `RCLONE_EXCLUDE="*.jpg"` | `--exclude "*.jpg"` |
|
||||
| `RCLONE_EXCLUDE="*.jpg,*.png"` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"*.jpg","*.png"'` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"/directory with comma , in it /**"'` | `--exclude "/directory with comma , in it /**" |
|
||||
|
||||
If `stringArray` options are defined as environment variables **and**
|
||||
options on the command line then all the values will be used.
|
||||
|
||||
### Config file ###
|
||||
|
||||
You can set defaults for values in the config file on an individual
|
||||
|
@ -16970,8 +16950,6 @@ processed in.
|
|||
Arrange the order of filter rules with the most restrictive first and
|
||||
work down.
|
||||
|
||||
Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._
|
||||
|
||||
E.g. for `filter-file.txt`:
|
||||
|
||||
# a sample filter rule file
|
||||
|
@ -16979,7 +16957,6 @@ E.g. for `filter-file.txt`:
|
|||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
|
@ -19790,7 +19767,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
|
||||
| PikPak | MD5 | R | No | No | R | - |
|
||||
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
|
||||
| premiumize.me | - | - | Yes | No | R | - |
|
||||
|
@ -20497,7 +20474,7 @@ Flags for general networking and HTTP stuff.
|
|||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
```
|
||||
|
||||
|
||||
|
@ -20646,7 +20623,7 @@ Flags to control the Remote Control API.
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -20682,7 +20659,7 @@ Flags to control the Remote Control API.
|
|||
Flags to control the Metrics HTTP endpoint..
|
||||
|
||||
```
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -21176,18 +21153,21 @@ Backend-only flags (these can be set in the config file also).
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
@ -24761,38 +24741,6 @@ there for more details.
|
|||
|
||||
Setting this flag increases the chance for undetected upload failures.
|
||||
|
||||
### Increasing performance
|
||||
|
||||
#### Using server-side copy
|
||||
|
||||
If you are copying objects between S3 buckets in the same region, you should
|
||||
use server-side copy.
|
||||
This is much faster than downloading and re-uploading the objects, as no data is transferred.
|
||||
|
||||
For rclone to use server-side copy, you must use the same remote for the source and destination.
|
||||
|
||||
rclone copy s3:source-bucket s3:destination-bucket
|
||||
|
||||
When using server-side copy, the performance is limited by the rate at which rclone issues
|
||||
API requests to S3.
|
||||
See below for how to increase the number of API requests rclone makes.
|
||||
|
||||
#### Increasing the rate of API requests
|
||||
|
||||
You can increase the rate of API requests to S3 by increasing the parallelism using `--transfers` and `--checkers`
|
||||
options.
|
||||
|
||||
Rclone uses a very conservative defaults for these settings, as not all providers support high rates of requests.
|
||||
Depending on your provider, you can increase significantly the number of transfers and checkers.
|
||||
|
||||
For example, with AWS S3, if you can increase the number of checkers to values like 200.
|
||||
If you are doing a server-side copy, you can also increase the number of transfers to 200.
|
||||
|
||||
rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket
|
||||
|
||||
You will need to experiment with these values to find the optimal settings for your setup.
|
||||
|
||||
|
||||
### Versions
|
||||
|
||||
When bucket versioning is enabled (this can be done with rclone with
|
||||
|
@ -27836,8 +27784,8 @@ chunk_size = 5M
|
|||
copy_cutoff = 5M
|
||||
```
|
||||
|
||||
[Scaleway Glacier](https://www.scaleway.com/en/glacier-cold-storage/) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
|
||||
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to Scaleway Glacier. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
|
||||
[C14 Cold Storage](https://www.online.net/en/storage/c14-cold-storage) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
|
||||
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to C14. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
|
||||
|
||||
### Seagate Lyve Cloud {#lyve}
|
||||
|
||||
|
@ -37901,9 +37849,9 @@ then select "OAuth client ID".
|
|||
|
||||
9. It will show you a client ID and client secret. Make a note of these.
|
||||
|
||||
(If you selected "External" at Step 5 continue to Step 10.
|
||||
(If you selected "External" at Step 5 continue to Step 9.
|
||||
If you chose "Internal" you don't need to publish and can skip straight to
|
||||
Step 11 but your destination drive must be part of the same Google Workspace.)
|
||||
Step 10 but your destination drive must be part of the same Google Workspace.)
|
||||
|
||||
10. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm.
|
||||
You will also want to add yourself as a test user.
|
||||
|
@ -48090,29 +48038,68 @@ Properties:
|
|||
|
||||
Here are the Advanced options specific to pikpak (PikPak).
|
||||
|
||||
#### --pikpak-device-id
|
||||
#### --pikpak-client-id
|
||||
|
||||
Device ID used for authorization.
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: device_id
|
||||
- Env Var: RCLONE_PIKPAK_DEVICE_ID
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-user-agent
|
||||
#### --pikpak-client-secret
|
||||
|
||||
HTTP user agent for pikpak.
|
||||
OAuth Client Secret.
|
||||
|
||||
Defaults to "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" or "--pikpak-user-agent" provided on command line.
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user_agent
|
||||
- Env Var: RCLONE_PIKPAK_USER_AGENT
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_PIKPAK_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-root-folder-id
|
||||
|
||||
|
@ -54615,55 +54602,6 @@ Options:
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.68.2 - 2024-11-15
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.1...v1.68.2)
|
||||
|
||||
* Security fixes
|
||||
* local backend: CVE-2024-52522: fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
|
||||
* Only affects users using `--metadata` and `--links` and copying files to the local backend
|
||||
* See https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
* build: bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (dependabot)
|
||||
* This is an issue in a dependency which is used for JWT certificates
|
||||
* See https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r
|
||||
* Bug Fixes
|
||||
* accounting: Fix wrong message on SIGUSR2 to enable/disable bwlimit (Nick Craig-Wood)
|
||||
* bisync: Fix output capture restoring the wrong output for logrus (Dimitrios Slamaris)
|
||||
* dlna: Fix loggingResponseWriter disregarding log level (Simon Bos)
|
||||
* serve s3: Fix excess locking which was making serve s3 single threaded (Nick Craig-Wood)
|
||||
* doc fixes (Nick Craig-Wood, tgfisher, Alexandre Hamez, Randy Bush)
|
||||
* Local
|
||||
* Fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
|
||||
* Fix `--copy-links` on macOS when cloning (nielash)
|
||||
* Onedrive
|
||||
* Fix Retry-After handling to look at 503 errors also (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Fix cid/gcid calculations for fs.OverrideRemote (wiserain)
|
||||
* Fix fatal crash on startup with token that can't be refreshed (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix crash when using `--s3-download-url` after migration to SDKv2 (Nick Craig-Wood)
|
||||
* Storj provider: fix server-side copy of files bigger than 5GB (Kaloyan Raev)
|
||||
* Fix multitenant multipart uploads with CEPH (Nick Craig-Wood)
|
||||
|
||||
## v1.68.1 - 2024-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Fix docker release build (ttionya)
|
||||
* doc fixes (Nick Craig-Wood, Pawel Palucha)
|
||||
* fs
|
||||
* Fix `--dump filters` not always appearing (Nick Craig-Wood)
|
||||
* Fix setting `stringArray` config values from environment variables (Nick Craig-Wood)
|
||||
* rc: Fix default value of `--metrics-addr` (Nick Craig-Wood)
|
||||
* serve docker: Add missing `vfs-read-chunk-streams` option in docker volume driver (Divyam)
|
||||
* Onedrive
|
||||
* Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Fix login issue where token retrieval fails (wiserain)
|
||||
* S3
|
||||
* Fix rclone ignoring static credentials when `env_auth=true` (Nick Craig-Wood)
|
||||
|
||||
## v1.68.0 - 2024-09-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0)
|
||||
|
|
245
MANUAL.txt
generated
245
MANUAL.txt
generated
|
@ -1,6 +1,6 @@
|
|||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Nov 15, 2024
|
||||
Sep 08, 2024
|
||||
|
||||
Rclone syncs your files to cloud storage
|
||||
|
||||
|
@ -4843,9 +4843,7 @@ manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -5190,9 +5188,8 @@ Note that systemd runs mount units without any environment variables
|
|||
including PATH or HOME. This means that tilde (~) expansion will not
|
||||
work and you should provide --config and --cache-dir explicitly as
|
||||
absolute paths via rclone arguments. Since mounting requires the
|
||||
fusermount or fusermount3 program, rclone will use the fallback PATH of
|
||||
/bin:/usr/bin in this scenario. Please ensure that
|
||||
fusermount/fusermount3 is present on this PATH.
|
||||
fusermount program, rclone will use the fallback PATH of /bin:/usr/bin
|
||||
in this scenario. Please ensure that fusermount is present on this PATH.
|
||||
|
||||
Rclone as Unix mount helper
|
||||
|
||||
|
@ -6030,9 +6027,7 @@ manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -6377,9 +6372,8 @@ Note that systemd runs mount units without any environment variables
|
|||
including PATH or HOME. This means that tilde (~) expansion will not
|
||||
work and you should provide --config and --cache-dir explicitly as
|
||||
absolute paths via rclone arguments. Since mounting requires the
|
||||
fusermount or fusermount3 program, rclone will use the fallback PATH of
|
||||
/bin:/usr/bin in this scenario. Please ensure that
|
||||
fusermount/fusermount3 is present on this PATH.
|
||||
fusermount program, rclone will use the fallback PATH of /bin:/usr/bin
|
||||
in this scenario. Please ensure that fusermount is present on this PATH.
|
||||
|
||||
Rclone as Unix mount helper
|
||||
|
||||
|
@ -7304,7 +7298,7 @@ RC Options
|
|||
Flags to control the Remote Control API
|
||||
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -15710,29 +15704,6 @@ they take exactly the same form.
|
|||
The options set by environment variables can be seen with the -vv flag,
|
||||
e.g. rclone version -vv.
|
||||
|
||||
Options that can appear multiple times (type stringArray) are treated
|
||||
slighly differently as environment variables can only be defined once.
|
||||
In order to allow a simple mechanism for adding one or many items, the
|
||||
input is treated as a CSV encoded string. For example
|
||||
|
||||
----------------------------------------------------------------------------------------
|
||||
Environment Variable Equivalent options
|
||||
------------------------------------------------------ ---------------------------------
|
||||
RCLONE_EXCLUDE="*.jpg" --exclude "*.jpg"
|
||||
|
||||
RCLONE_EXCLUDE="*.jpg,*.png" --exclude "*.jpg"
|
||||
--exclude "*.png"
|
||||
|
||||
RCLONE_EXCLUDE='"*.jpg","*.png"' --exclude "*.jpg"
|
||||
--exclude "*.png"
|
||||
|
||||
RCLONE_EXCLUDE='"/directory with comma , in it /**"' `--exclude "/directory with comma
|
||||
, in it /**"
|
||||
----------------------------------------------------------------------------------------
|
||||
|
||||
If stringArray options are defined as environment variables and options
|
||||
on the command line then all the values will be used.
|
||||
|
||||
Config file
|
||||
|
||||
You can set defaults for values in the config file on an individual
|
||||
|
@ -16428,10 +16399,6 @@ processed in.
|
|||
Arrange the order of filter rules with the most restrictive first and
|
||||
work down.
|
||||
|
||||
Lines starting with # or ; are ignored, and can be used to write
|
||||
comments. Inline comments are not supported. Use -vv --dump filters to
|
||||
see how they appear in the final regexp.
|
||||
|
||||
E.g. for filter-file.txt:
|
||||
|
||||
# a sample filter rule file
|
||||
|
@ -16439,7 +16406,6 @@ E.g. for filter-file.txt:
|
|||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
|
@ -19274,7 +19240,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||
OpenDrive MD5 R/W Yes Partial ⁸ - -
|
||||
OpenStack Swift MD5 R/W No No R/W -
|
||||
Oracle Object Storage MD5 R/W No No R/W -
|
||||
pCloud MD5, SHA1 ⁷ R/W No No W -
|
||||
pCloud MD5, SHA1 ⁷ R No No W -
|
||||
PikPak MD5 R No No R -
|
||||
Pixeldrain SHA256 R/W No No R RW
|
||||
premiumize.me - - Yes No R -
|
||||
|
@ -20081,7 +20047,7 @@ Flags for general networking and HTTP stuff.
|
|||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
|
||||
Performance
|
||||
|
||||
|
@ -20206,7 +20172,7 @@ RC
|
|||
Flags to control the Remote Control API.
|
||||
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -20239,7 +20205,7 @@ Metrics
|
|||
|
||||
Flags to control the Metrics HTTP endpoint..
|
||||
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -20730,18 +20696,21 @@ Backend-only flags (these can be set in the config file also).
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
@ -24306,41 +24275,6 @@ details.
|
|||
|
||||
Setting this flag increases the chance for undetected upload failures.
|
||||
|
||||
Increasing performance
|
||||
|
||||
Using server-side copy
|
||||
|
||||
If you are copying objects between S3 buckets in the same region, you
|
||||
should use server-side copy. This is much faster than downloading and
|
||||
re-uploading the objects, as no data is transferred.
|
||||
|
||||
For rclone to use server-side copy, you must use the same remote for the
|
||||
source and destination.
|
||||
|
||||
rclone copy s3:source-bucket s3:destination-bucket
|
||||
|
||||
When using server-side copy, the performance is limited by the rate at
|
||||
which rclone issues API requests to S3. See below for how to increase
|
||||
the number of API requests rclone makes.
|
||||
|
||||
Increasing the rate of API requests
|
||||
|
||||
You can increase the rate of API requests to S3 by increasing the
|
||||
parallelism using --transfers and --checkers options.
|
||||
|
||||
Rclone uses a very conservative defaults for these settings, as not all
|
||||
providers support high rates of requests. Depending on your provider,
|
||||
you can increase significantly the number of transfers and checkers.
|
||||
|
||||
For example, with AWS S3, if you can increase the number of checkers to
|
||||
values like 200. If you are doing a server-side copy, you can also
|
||||
increase the number of transfers to 200.
|
||||
|
||||
rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket
|
||||
|
||||
You will need to experiment with these values to find the optimal
|
||||
settings for your setup.
|
||||
|
||||
Versions
|
||||
|
||||
When bucket versioning is enabled (this can be done with rclone with the
|
||||
|
@ -27369,13 +27303,13 @@ rclone like this:
|
|||
chunk_size = 5M
|
||||
copy_cutoff = 5M
|
||||
|
||||
Scaleway Glacier is the low-cost S3 Glacier alternative from Scaleway
|
||||
C14 Cold Storage is the low-cost S3 Glacier alternative from Scaleway
|
||||
and it works the same way as on S3 by accepting the "GLACIER"
|
||||
storage_class. So you can configure your remote with the
|
||||
storage_class = GLACIER option to upload directly to Scaleway Glacier.
|
||||
Don't forget that in this state you can't read files back after, you
|
||||
will need to restore them to "STANDARD" storage_class first before being
|
||||
able to read them (see "restore" section above)
|
||||
storage_class = GLACIER option to upload directly to C14. Don't forget
|
||||
that in this state you can't read files back after, you will need to
|
||||
restore them to "STANDARD" storage_class first before being able to read
|
||||
them (see "restore" section above)
|
||||
|
||||
Seagate Lyve Cloud
|
||||
|
||||
|
@ -37329,9 +37263,9 @@ Here is how to create your own Google Drive client ID for rclone:
|
|||
9. It will show you a client ID and client secret. Make a note of
|
||||
these.
|
||||
|
||||
(If you selected "External" at Step 5 continue to Step 10. If you
|
||||
(If you selected "External" at Step 5 continue to Step 9. If you
|
||||
chose "Internal" you don't need to publish and can skip straight to
|
||||
Step 11 but your destination drive must be part of the same Google
|
||||
Step 10 but your destination drive must be part of the same Google
|
||||
Workspace.)
|
||||
|
||||
10. Go to "Oauth consent screen" and then click "PUBLISH APP" button and
|
||||
|
@ -47761,32 +47695,68 @@ Advanced options
|
|||
|
||||
Here are the Advanced options specific to pikpak (PikPak).
|
||||
|
||||
--pikpak-device-id
|
||||
--pikpak-client-id
|
||||
|
||||
Device ID used for authorization.
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: device_id
|
||||
- Env Var: RCLONE_PIKPAK_DEVICE_ID
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
--pikpak-user-agent
|
||||
--pikpak-client-secret
|
||||
|
||||
HTTP user agent for pikpak.
|
||||
OAuth Client Secret.
|
||||
|
||||
Defaults to "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0)
|
||||
Gecko/20100101 Firefox/129.0" or "--pikpak-user-agent" provided on
|
||||
command line.
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user_agent
|
||||
- Env Var: RCLONE_PIKPAK_USER_AGENT
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0)
|
||||
Gecko/20100101 Firefox/129.0"
|
||||
- Required: false
|
||||
|
||||
--pikpak-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
--pikpak-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_PIKPAK_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
--pikpak-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
--pikpak-root-folder-id
|
||||
|
||||
|
@ -54295,75 +54265,6 @@ Options:
|
|||
|
||||
Changelog
|
||||
|
||||
v1.68.2 - 2024-11-15
|
||||
|
||||
See commits
|
||||
|
||||
- Security fixes
|
||||
- local backend: CVE-2024-52522: fix permission and ownership on
|
||||
symlinks with --links and --metadata (Nick Craig-Wood)
|
||||
- Only affects users using --metadata and --links and copying
|
||||
files to the local backend
|
||||
- See
|
||||
https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
- build: bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1
|
||||
(dependabot)
|
||||
- This is an issue in a dependency which is used for JWT
|
||||
certificates
|
||||
- See
|
||||
https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r
|
||||
- Bug Fixes
|
||||
- accounting: Fix wrong message on SIGUSR2 to enable/disable
|
||||
bwlimit (Nick Craig-Wood)
|
||||
- bisync: Fix output capture restoring the wrong output for logrus
|
||||
(Dimitrios Slamaris)
|
||||
- dlna: Fix loggingResponseWriter disregarding log level (Simon
|
||||
Bos)
|
||||
- serve s3: Fix excess locking which was making serve s3 single
|
||||
threaded (Nick Craig-Wood)
|
||||
- doc fixes (Nick Craig-Wood, tgfisher, Alexandre Hamez, Randy
|
||||
Bush)
|
||||
- Local
|
||||
- Fix permission and ownership on symlinks with --links and
|
||||
--metadata (Nick Craig-Wood)
|
||||
- Fix --copy-links on macOS when cloning (nielash)
|
||||
- Onedrive
|
||||
- Fix Retry-After handling to look at 503 errors also (Nick
|
||||
Craig-Wood)
|
||||
- Pikpak
|
||||
- Fix cid/gcid calculations for fs.OverrideRemote (wiserain)
|
||||
- Fix fatal crash on startup with token that can't be refreshed
|
||||
(Nick Craig-Wood)
|
||||
- S3
|
||||
- Fix crash when using --s3-download-url after migration to SDKv2
|
||||
(Nick Craig-Wood)
|
||||
- Storj provider: fix server-side copy of files bigger than 5GB
|
||||
(Kaloyan Raev)
|
||||
- Fix multitenant multipart uploads with CEPH (Nick Craig-Wood)
|
||||
|
||||
v1.68.1 - 2024-09-24
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- build: Fix docker release build (ttionya)
|
||||
- doc fixes (Nick Craig-Wood, Pawel Palucha)
|
||||
- fs
|
||||
- Fix --dump filters not always appearing (Nick Craig-Wood)
|
||||
- Fix setting stringArray config values from environment
|
||||
variables (Nick Craig-Wood)
|
||||
- rc: Fix default value of --metrics-addr (Nick Craig-Wood)
|
||||
- serve docker: Add missing vfs-read-chunk-streams option in
|
||||
docker volume driver (Divyam)
|
||||
- Onedrive
|
||||
- Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick
|
||||
Craig-Wood)
|
||||
- Pikpak
|
||||
- Fix login issue where token retrieval fails (wiserain)
|
||||
- S3
|
||||
- Fix rclone ignoring static credentials when env_auth=true (Nick
|
||||
Craig-Wood)
|
||||
|
||||
v1.68.0 - 2024-09-08
|
||||
|
||||
See commits
|
||||
|
|
6
Makefile
6
Makefile
|
@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md
|
|||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
|
|
@ -66,6 +66,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
|
@ -92,6 +93,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
|
@ -109,6 +111,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.68.2
|
||||
v1.69.0
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
|
|
|
@ -209,6 +209,22 @@ rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
|||
keys instead of setting ` + "`service_principal_file`" + `.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_msi",
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||
|
@ -243,6 +259,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
|||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
|
@ -438,10 +468,12 @@ type Options struct {
|
|||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
|
@ -725,7 +757,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
|
@ -875,6 +908,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
|
|
|
@ -43,6 +43,7 @@ import (
|
|||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
|
@ -256,7 +257,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
|
@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
|
@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// check if dest already exists
|
||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||
err = f.deleteObject(ctx, item.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Move(ctx, tempObj, remote)
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
|
|
@ -120,6 +120,7 @@ var (
|
|||
"text/html": ".html",
|
||||
"text/plain": ".txt",
|
||||
"text/tab-separated-values": ".tsv",
|
||||
"text/markdown": ".md",
|
||||
}
|
||||
_mimeTypeToExtensionLinks = map[string]string{
|
||||
"application/x-link-desktop": ".desktop",
|
||||
|
@ -3558,7 +3559,8 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
// Run the drive query calling fn on each entry found
|
||||
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
|
||||
list := f.svc.Files.List()
|
||||
if query != "" {
|
||||
list.Q(query)
|
||||
|
@ -3577,10 +3579,7 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
|||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
|
||||
|
||||
var results []*drive.File
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -3588,20 +3587,66 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er
|
|||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %w", err)
|
||||
return fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
}
|
||||
results = append(results, files.Files...)
|
||||
for _, item := range files.Files {
|
||||
fn(item)
|
||||
}
|
||||
if files.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
list.PageToken(files.NextPageToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the drive query returning the entries found
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
var results []*drive.File
|
||||
err = f.queryFn(ctx, query, func(item *drive.File) {
|
||||
results = append(results, item)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Rescue, list or delete orphaned files
|
||||
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
|
||||
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
|
||||
if len(item.Parents) != 0 {
|
||||
return
|
||||
}
|
||||
// Have found an orphaned entry
|
||||
if delete {
|
||||
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
|
||||
err = f.delete(ctx, item.Id, true)
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
|
||||
}
|
||||
} else if dirID == "" {
|
||||
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
|
||||
} else {
|
||||
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(item.Id, nil).
|
||||
AddParents(dirID).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
|
@ -3793,6 +3838,37 @@ The result is a JSON array of matches, for example:
|
|||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files",
|
||||
Long: `This command rescues or deletes any orphaned files or directories.
|
||||
|
||||
Sometimes files can get orphaned in Google Drive. This means that they
|
||||
are no longer in any folder in Google Drive.
|
||||
|
||||
This command finds those files and either rescues them to a directory
|
||||
you specify or deletes them.
|
||||
|
||||
Usage:
|
||||
|
||||
This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
|
||||
rclone backend rescue drive:
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
|
@ -3921,6 +3997,22 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
if len(arg) == 0 {
|
||||
// no arguments - list only
|
||||
} else if !delete && len(arg) == 1 {
|
||||
dir := arg[0]
|
||||
dirID, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
|
||||
}
|
||||
fs.Infof(f, "Rescuing orphans into %q", dir)
|
||||
} else {
|
||||
return nil, errors.New("syntax error: need 0 or 1 args or -o delete")
|
||||
}
|
||||
return nil, f.rescue(ctx, dirID, delete)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||
wantErr error
|
||||
}{
|
||||
{"doc", []string{".doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
||||
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||
} {
|
||||
|
|
|
@ -47,6 +47,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
|
@ -1020,13 +1021,20 @@ func (f *Fs) Precision() time.Duration {
|
|||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
|
@ -1040,7 +1048,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
|
|
|
@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password
|
|||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_upload",
|
||||
Default: false,
|
||||
Help: `Don't check the upload is OK
|
||||
|
||||
Normally rclone will try to check the upload exists after it has
|
||||
uploaded a file to make sure the size and modification time are as
|
||||
expected.
|
||||
|
||||
This flag stops rclone doing these checks. This enables uploading to
|
||||
folders which are write only.
|
||||
|
||||
You will likely need to use the --inplace flag also if uploading to
|
||||
a write only folder.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
|
@ -232,6 +248,7 @@ type Options struct {
|
|||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
|
@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if o.fs.opt.NoCheckUpload {
|
||||
o.info = &FileInfo{
|
||||
Name: o.remote,
|
||||
Size: uint64(src.Size()),
|
||||
ModTime: src.ModTime(ctx),
|
||||
precise: true,
|
||||
IsDir: false,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
|
|
|
@ -1214,7 +1214,7 @@ func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID
|
|||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
|
@ -1228,6 +1228,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Find existing object
|
||||
existingObj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
// Don't remove existing object if returning an error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
|
||||
err = existingObj.Remove(ctx)
|
||||
}()
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
|
|
|
@ -60,16 +60,14 @@ const (
|
|||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
// Description of how to auth for this app
|
||||
var storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
|
@ -106,6 +104,12 @@ func init() {
|
|||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Sensitive: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
|
@ -379,6 +383,7 @@ type Options struct {
|
|||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
|
@ -535,6 +540,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else if opt.AccessToken != "" {
|
||||
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
|
@ -944,7 +952,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
|
@ -160,6 +159,34 @@ listings and transferred.
|
|||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "proxy",
|
||||
Default: "",
|
||||
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||
|
||||
First run with
|
||||
|
||||
gphotosdl -login
|
||||
|
||||
Then once you have logged into google photos close the browser window
|
||||
and run
|
||||
|
||||
gphotosdl
|
||||
|
||||
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||
rclone use the proxy.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
@ -181,6 +208,7 @@ type Options struct {
|
|||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
Proxy string `config:"proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
|
@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
|||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
// defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
|
@ -667,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
|||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
|
@ -684,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
|
@ -737,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
|||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
|
@ -761,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
|
@ -834,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
|
@ -935,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
|
@ -965,16 +993,20 @@ func (o *Object) downloadURL() string {
|
|||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
url := o.downloadURL()
|
||||
if o.fs.opt.Proxy != "" {
|
||||
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
@ -1067,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
|
|
166
backend/iclouddrive/api/client.go
Normal file
166
backend/iclouddrive/api/client.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
// Package api provides functionality for interacting with the iCloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
baseEndpoint = "https://www.icloud.com"
|
||||
homeEndpoint = "https://www.icloud.com"
|
||||
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
|
||||
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
|
||||
)
|
||||
|
||||
type sessionSave func(*Session)
|
||||
|
||||
// Client defines the client configuration
|
||||
type Client struct {
|
||||
appleID string
|
||||
password string
|
||||
srv *rest.Client
|
||||
Session *Session
|
||||
sessionSaveCallback sessionSave
|
||||
|
||||
drive *DriveService
|
||||
}
|
||||
|
||||
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
|
||||
//
|
||||
// Parameters:
|
||||
// - appleID: the Apple ID of the user.
|
||||
// - password: the password of the user.
|
||||
// - trustToken: the trust token for the session.
|
||||
// - clientID: the client id for the session.
|
||||
// - cookies: the cookies for the session.
|
||||
// - sessionSaveCallback: the callback function to save the session.
|
||||
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
|
||||
icloud := &Client{
|
||||
appleID: appleID,
|
||||
password: password,
|
||||
srv: rest.NewClient(fshttp.NewClient(context.Background())),
|
||||
Session: NewSession(),
|
||||
sessionSaveCallback: sessionSaveCallback,
|
||||
}
|
||||
|
||||
icloud.Session.TrustToken = trustToken
|
||||
icloud.Session.Cookies = cookies
|
||||
icloud.Session.ClientID = clientID
|
||||
return icloud, nil
|
||||
}
|
||||
|
||||
// DriveService returns the DriveService instance associated with the Client.
|
||||
func (c *Client) DriveService() (*DriveService, error) {
|
||||
var err error
|
||||
if c.drive == nil {
|
||||
c.drive, err = NewDriveService(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.drive, nil
|
||||
}
|
||||
|
||||
// Request makes a request and retries it if the session is invalid.
|
||||
//
|
||||
// This function is the main entry point for making requests to the iCloud
|
||||
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||
// reauthenticate and retry the request.
|
||||
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
if err != nil && resp != nil {
|
||||
// try to reauth
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 421 {
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.Session.Requires2FA() {
|
||||
return nil, errors.New("trust token expired, please reauth")
|
||||
}
|
||||
return c.RequestNoReAuth(ctx, opts, request, response)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// RequestNoReAuth makes a request without re-authenticating.
|
||||
//
|
||||
// This function is useful when you have a session that is already
|
||||
// authenticated, but you need to make a request without triggering
|
||||
// a re-authentication.
|
||||
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
// Make the request without re-authenticating
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Authenticate authenticates the client with the iCloud API.
|
||||
func (c *Client) Authenticate(ctx context.Context) error {
|
||||
if c.Session.Cookies != nil {
|
||||
if err := c.Session.ValidateSession(ctx); err == nil {
|
||||
fs.Debugf("icloud", "Valid session, no need to reauth")
|
||||
return nil
|
||||
}
|
||||
c.Session.Cookies = nil
|
||||
}
|
||||
|
||||
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
|
||||
err := c.Session.SignIn(ctx, c.appleID, c.password)
|
||||
|
||||
if err == nil {
|
||||
err = c.Session.AuthWithToken(ctx)
|
||||
if err == nil && c.sessionSaveCallback != nil {
|
||||
c.sessionSaveCallback(c.Session)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SignIn signs in the client using the provided context and credentials.
|
||||
func (c *Client) SignIn(ctx context.Context) error {
|
||||
return c.Session.SignIn(ctx, c.appleID, c.password)
|
||||
}
|
||||
|
||||
// IntoReader marshals the provided values into a JSON encoded reader
|
||||
func IntoReader(values any) (*bytes.Reader, error) {
|
||||
m, err := json.Marshal(values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewReader(m), nil
|
||||
}
|
||||
|
||||
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
|
||||
type RequestError struct {
|
||||
Status string
|
||||
Text string
|
||||
}
|
||||
|
||||
// Error satisfy the error interface.
|
||||
func (e *RequestError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Text, e.Status)
|
||||
}
|
||||
|
||||
func newRequestError(Status string, Text string) *RequestError {
|
||||
return &RequestError{
|
||||
Status: strings.ToLower(Status),
|
||||
Text: Text,
|
||||
}
|
||||
}
|
||||
|
||||
// newErr orf makes a new error from sprintf parameters.
|
||||
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
||||
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||
}
|
913
backend/iclouddrive/api/drive.go
Normal file
913
backend/iclouddrive/api/drive.go
Normal file
|
@ -0,0 +1,913 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultZone = "com.apple.CloudDocs"
|
||||
statusOk = "OK"
|
||||
statusEtagConflict = "ETAG_CONFLICT"
|
||||
)
|
||||
|
||||
// DriveService represents an iCloud Drive service.
|
||||
type DriveService struct {
|
||||
icloud *Client
|
||||
RootID string
|
||||
endpoint string
|
||||
docsEndpoint string
|
||||
}
|
||||
|
||||
// NewDriveService creates a new DriveService instance.
|
||||
func NewDriveService(icloud *Client) (*DriveService, error) {
|
||||
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
|
||||
}
|
||||
|
||||
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
|
||||
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
|
||||
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return items[0], resp, err
|
||||
}
|
||||
|
||||
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
|
||||
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
|
||||
var err error
|
||||
_items := []map[string]any{}
|
||||
for _, id := range ids {
|
||||
_items = append(_items, map[string]any{
|
||||
"drivewsid": id,
|
||||
"partialData": false,
|
||||
"includeHierarchy": false,
|
||||
})
|
||||
}
|
||||
|
||||
var body *bytes.Reader
|
||||
var path string
|
||||
if !includeChildren {
|
||||
values := []map[string]any{{
|
||||
"items": _items,
|
||||
}}
|
||||
body, err = IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path = "/retrieveItemDetails"
|
||||
} else {
|
||||
values := _items
|
||||
body, err = IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
path = "/retrieveItemDetailsInFolders"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: path,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var items []*DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return items, resp, err
|
||||
}
|
||||
|
||||
// GetDocByPath retrieves a document by its path.
|
||||
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("unified_format", "false")
|
||||
body, err := IntoReader(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
Body: body,
|
||||
}
|
||||
var item []*Document
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item[0], resp, err
|
||||
}
|
||||
|
||||
// GetItemByPath retrieves a DriveItem by its path.
|
||||
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("unified_format", "true")
|
||||
|
||||
body, err := IntoReader(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
Body: body,
|
||||
}
|
||||
var item []*DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item[0], resp, err
|
||||
}
|
||||
|
||||
// GetDocByItemID retrieves a document by its item ID.
|
||||
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("document_id", id)
|
||||
values.Set("unified_format", "false") // important
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
}
|
||||
var item *Document
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item, resp, err
|
||||
}
|
||||
|
||||
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
|
||||
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v1/item/" + id,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
}
|
||||
var item *DriveItemRaw
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return item, resp, err
|
||||
}
|
||||
|
||||
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
|
||||
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
|
||||
values := url.Values{}
|
||||
values.Set("limit", strconv.FormatInt(limit, 10))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v1/enumerate/" + id,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
items := struct {
|
||||
Items []*DriveItemRaw `json:"drive_item"`
|
||||
}{}
|
||||
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return items.Items, resp, err
|
||||
}
|
||||
|
||||
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
|
||||
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
|
||||
_, zone, docid := DeconstructDriveID(id)
|
||||
values := url.Values{}
|
||||
values.Set("document_id", docid)
|
||||
|
||||
if zone == "" {
|
||||
zone = defaultZone
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/ws/" + zone + "/download/by_id",
|
||||
Parameters: values,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
}
|
||||
|
||||
var filer *FileRequest
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
|
||||
|
||||
if err != nil {
|
||||
return "", resp, err
|
||||
}
|
||||
|
||||
var url string
|
||||
if filer.DataToken != nil {
|
||||
url = filer.DataToken.URL
|
||||
} else {
|
||||
url = filer.PackageToken.URL
|
||||
}
|
||||
|
||||
return url, resp, err
|
||||
}
|
||||
|
||||
// DownloadFile downloads a file from the given URL using the provided options.
|
||||
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: url,
|
||||
Options: opt,
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByID moves an item to the trash based on the item ID.
|
||||
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": drivewsid,
|
||||
"etag": etag,
|
||||
"clientId": drivewsid,
|
||||
}}}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/moveItemsToTrash",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
item := struct {
|
||||
Items []*DriveItem `json:"items"`
|
||||
}{}
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
if item.Items[0].Status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && item.Items[0].Status == "ETAG_CONFLICT" {
|
||||
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
|
||||
}
|
||||
|
||||
err = newRequestError(item.Items[0].Status, "unknown request status")
|
||||
}
|
||||
|
||||
return item.Items[0], resp, err
|
||||
}
|
||||
|
||||
// CreateNewFolderByItemID creates a new folder by item ID.
|
||||
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
|
||||
}
|
||||
|
||||
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
|
||||
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"destinationDrivewsId": drivewsid,
|
||||
"folders": []map[string]any{{
|
||||
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
|
||||
"name": name,
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/createFolders",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var fResp *CreateFoldersResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
status := fResp.Folders[0].Status
|
||||
if status != statusOk {
|
||||
err = newRequestError(status, "unknown request status")
|
||||
}
|
||||
|
||||
return fResp.Folders[0], resp, err
|
||||
}
|
||||
|
||||
// RenameItemByItemID renames a DriveItem by its item ID.
|
||||
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
|
||||
}
|
||||
|
||||
// RenameItemByDriveID renames a DriveItem by its drive ID.
|
||||
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": id,
|
||||
"name": name,
|
||||
"etag": etag,
|
||||
// "extension": split[1],
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/renameItems",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
var items *DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
status := items.Items[0].Status
|
||||
if status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && status == "ETAG_CONFLICT" {
|
||||
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
|
||||
}
|
||||
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
return items.Items[0], resp, err
|
||||
}
|
||||
|
||||
// MoveItemByItemID moves an item by its item ID to a destination item ID.
|
||||
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
docSrc, resp, err := d.GetDocByItemID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
|
||||
}
|
||||
|
||||
// MoveItemByDocID moves an item by its doc ID.
|
||||
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
|
||||
// }
|
||||
|
||||
// MoveItemByDriveID moves an item by its drive ID.
|
||||
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||
values := map[string]any{
|
||||
"destinationDrivewsId": dstID,
|
||||
"items": []map[string]any{{
|
||||
"drivewsid": id,
|
||||
"etag": etag,
|
||||
"clientId": id,
|
||||
}},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/moveItems",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.endpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
var items *DriveItem
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
status := items.Items[0].Status
|
||||
if status != statusOk {
|
||||
// rerun with latest etag
|
||||
if force && status == "ETAG_CONFLICT" {
|
||||
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
|
||||
}
|
||||
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
return items.Items[0], resp, err
|
||||
}
|
||||
|
||||
// CopyDocByItemID copies a document by its item ID.
|
||||
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||
values := map[string]any{
|
||||
"info_to_update": map[string]any{},
|
||||
}
|
||||
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/v1/item/copy/" + itemID,
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
var info *DriveItemRaw
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// CreateUpload creates an url for an upload.
|
||||
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
|
||||
// first we need to request an upload url
|
||||
values := map[string]any{
|
||||
"filename": name,
|
||||
"type": "FILE",
|
||||
"size": strconv.FormatInt(size, 10),
|
||||
"content_type": GetContentTypeForFile(name),
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/upload/web",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
var responseInfo []*UploadResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return responseInfo[0], resp, err
|
||||
}
|
||||
|
||||
// Upload uploads a file to the given url
|
||||
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
|
||||
// TODO: implement multipart upload
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: uploadURL,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: GetContentTypeForFile(name),
|
||||
// MultipartContentName: "files",
|
||||
MultipartFileName: name,
|
||||
}
|
||||
var singleFileResponse *SingleFileResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return singleFileResponse, resp, err
|
||||
}
|
||||
|
||||
// UpdateFile updates a file in the DriveService.
|
||||
//
|
||||
// ctx: the context.Context object for the request.
|
||||
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
|
||||
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
|
||||
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
|
||||
body, err := IntoReader(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/ws/" + defaultZone + "/update/documents",
|
||||
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||
RootURL: d.docsEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
var responseInfo *DocumentUpdateResponse
|
||||
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
doc := responseInfo.Results[0].Document
|
||||
item := DriveItem{
|
||||
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
|
||||
Docwsid: doc.DocumentID,
|
||||
Itemid: doc.ItemID,
|
||||
Etag: doc.Etag,
|
||||
ParentID: doc.ParentID,
|
||||
DateModified: time.Unix(r.Mtime, 0),
|
||||
DateCreated: time.Unix(r.Mtime, 0),
|
||||
Type: doc.Type,
|
||||
Name: doc.Name,
|
||||
Size: doc.Size,
|
||||
}
|
||||
|
||||
return &item, resp, err
|
||||
}
|
||||
|
||||
// UpdateFileInfo represents the information for an update to a file in the DriveService.
|
||||
type UpdateFileInfo struct {
|
||||
AllowConflict bool `json:"allow_conflict"`
|
||||
Btime int64 `json:"btime"`
|
||||
Command string `json:"command"`
|
||||
CreateShortGUID bool `json:"create_short_guid"`
|
||||
Data struct {
|
||||
Receipt string `json:"receipt,omitempty"`
|
||||
ReferenceSignature string `json:"reference_signature,omitempty"`
|
||||
Signature string `json:"signature,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
WrappingKey string `json:"wrapping_key,omitempty"`
|
||||
} `json:"data,omitempty"`
|
||||
DocumentID string `json:"document_id"`
|
||||
FileFlags FileFlags `json:"file_flags"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
Path struct {
|
||||
Path string `json:"path"`
|
||||
StartingDocumentID string `json:"starting_document_id"`
|
||||
} `json:"path"`
|
||||
}
|
||||
|
||||
// FileFlags defines the file flags for a document.
|
||||
type FileFlags struct {
|
||||
IsExecutable bool `json:"is_executable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
}
|
||||
|
||||
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
|
||||
//
|
||||
// Returns an UpdateFileInfo object.
|
||||
func NewUpdateFileInfo() UpdateFileInfo {
|
||||
return UpdateFileInfo{
|
||||
Command: "add_file",
|
||||
CreateShortGUID: true,
|
||||
AllowConflict: true,
|
||||
FileFlags: FileFlags{
|
||||
IsExecutable: true,
|
||||
IsHidden: false,
|
||||
IsWritable: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DriveItemRaw is a raw drive item.
|
||||
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
|
||||
type DriveItemRaw struct {
|
||||
ItemID string `json:"item_id"`
|
||||
ItemInfo *DriveItemRawInfo `json:"item_info"`
|
||||
}
|
||||
|
||||
// SplitName splits the name of a DriveItemRaw into its name and extension.
|
||||
//
|
||||
// It returns the name and extension as separate strings. If the name ends with a dot,
|
||||
// it means there is no extension, so an empty string is returned for the extension.
|
||||
// If the name does not contain a dot, it means
|
||||
func (d *DriveItemRaw) SplitName() (string, string) {
|
||||
name := d.ItemInfo.Name
|
||||
// ends with a dot, no extension
|
||||
if strings.HasSuffix(name, ".") {
|
||||
return name, ""
|
||||
}
|
||||
lastInd := strings.LastIndex(name, ".")
|
||||
|
||||
if lastInd == -1 {
|
||||
return name, ""
|
||||
}
|
||||
return name[:lastInd], name[lastInd+1:]
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the DriveItemRaw.
|
||||
//
|
||||
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||
// If the parsing fails, it returns the zero value of time.Time.
|
||||
// The returned time.Time value represents the modification time of the DriveItemRaw.
|
||||
func (d *DriveItemRaw) ModTime() time.Time {
|
||||
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.UnixMilli(i)
|
||||
}
|
||||
|
||||
// CreatedTime returns the creation time of the DriveItemRaw.
|
||||
//
|
||||
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||
// If the parsing fails, it returns the zero value of time.Time.
|
||||
// The returned time.Time
|
||||
func (d *DriveItemRaw) CreatedTime() time.Time {
|
||||
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.UnixMilli(i)
|
||||
}
|
||||
|
||||
// DriveItemRawInfo is the raw information about a drive item.
|
||||
type DriveItemRawInfo struct {
|
||||
Name string `json:"name"`
|
||||
// Extension is absolutely borked on endpoints so dont use it.
|
||||
Extension string `json:"extension"`
|
||||
Size int64 `json:"size,string"`
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
ModifiedAt string `json:"modified_at"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
|
||||
//
|
||||
// It takes no parameters.
|
||||
// It returns a pointer to a DriveItem.
|
||||
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
|
||||
name, extension := d.SplitName()
|
||||
return &DriveItem{
|
||||
Itemid: d.ItemID,
|
||||
Name: name,
|
||||
Extension: extension,
|
||||
Type: d.ItemInfo.Type,
|
||||
Etag: d.ItemInfo.Version,
|
||||
DateModified: d.ModTime(),
|
||||
DateCreated: d.CreatedTime(),
|
||||
Size: d.ItemInfo.Size,
|
||||
Urls: d.ItemInfo.Urls,
|
||||
}
|
||||
}
|
||||
|
||||
// DocumentUpdateResponse is the response of a document update request.
|
||||
type DocumentUpdateResponse struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
Results []struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
OperationID interface{} `json:"operation_id"`
|
||||
Document *Document `json:"document"`
|
||||
} `json:"results"`
|
||||
}
|
||||
|
||||
// Document represents a document on iCloud.
|
||||
type Document struct {
|
||||
Status struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
DocumentID string `json:"document_id"`
|
||||
ItemID string `json:"item_id"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
Etag string `json:"etag"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Deleted bool `json:"deleted"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
LastEditorName string `json:"last_editor_name"`
|
||||
Data DocumentData `json:"data"`
|
||||
Size int64 `json:"size"`
|
||||
Btime int64 `json:"btime"`
|
||||
Zone string `json:"zone"`
|
||||
FileFlags struct {
|
||||
IsExecutable bool `json:"is_executable"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
} `json:"file_flags"`
|
||||
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||
RestorePath interface{} `json:"restorePath"`
|
||||
HasChainedParent bool `json:"hasChainedParent"`
|
||||
}
|
||||
|
||||
// DriveID returns the drive ID of the Document.
|
||||
func (d *Document) DriveID() string {
|
||||
if d.Zone == "" {
|
||||
d.Zone = defaultZone
|
||||
}
|
||||
return d.Type + "::" + d.Zone + "::" + d.DocumentID
|
||||
}
|
||||
|
||||
// DocumentData represents the data of a document.
|
||||
type DocumentData struct {
|
||||
Signature string `json:"signature"`
|
||||
Owner string `json:"owner"`
|
||||
Size int64 `json:"size"`
|
||||
ReferenceSignature string `json:"reference_signature"`
|
||||
WrappingKey string `json:"wrapping_key"`
|
||||
PcsInfo string `json:"pcsInfo"`
|
||||
}
|
||||
|
||||
// SingleFileResponse is the response of a single file request.
|
||||
type SingleFileResponse struct {
|
||||
SingleFile *SingleFileInfo `json:"singleFile"`
|
||||
}
|
||||
|
||||
// SingleFileInfo represents the information of a single file.
|
||||
type SingleFileInfo struct {
|
||||
ReferenceSignature string `json:"referenceChecksum"`
|
||||
Size int64 `json:"size"`
|
||||
Signature string `json:"fileChecksum"`
|
||||
WrappingKey string `json:"wrappingKey"`
|
||||
Receipt string `json:"receipt"`
|
||||
}
|
||||
|
||||
// UploadResponse is the response of an upload request.
|
||||
type UploadResponse struct {
|
||||
URL string `json:"url"`
|
||||
DocumentID string `json:"document_id"`
|
||||
}
|
||||
|
||||
// FileRequestToken represents the token of a file request.
|
||||
type FileRequestToken struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Signature string `json:"signature"`
|
||||
WrappingKey string `json:"wrapping_key"`
|
||||
ReferenceSignature string `json:"reference_signature"`
|
||||
}
|
||||
|
||||
// FileRequest represents the request of a file.
|
||||
type FileRequest struct {
|
||||
DocumentID string `json:"document_id"`
|
||||
ItemID string `json:"item_id"`
|
||||
OwnerDsid int64 `json:"owner_dsid"`
|
||||
DataToken *FileRequestToken `json:"data_token,omitempty"`
|
||||
PackageToken *FileRequestToken `json:"package_token,omitempty"`
|
||||
DoubleEtag string `json:"double_etag"`
|
||||
}
|
||||
|
||||
// CreateFoldersResponse is the response of a create folders request.
|
||||
type CreateFoldersResponse struct {
|
||||
Folders []*DriveItem `json:"folders"`
|
||||
}
|
||||
|
||||
// DriveItem represents an item on iCloud.
|
||||
type DriveItem struct {
|
||||
DateCreated time.Time `json:"dateCreated"`
|
||||
Drivewsid string `json:"drivewsid"`
|
||||
Docwsid string `json:"docwsid"`
|
||||
Itemid string `json:"item_id"`
|
||||
Zone string `json:"zone"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
Hierarchy []DriveItem `json:"hierarchy"`
|
||||
Etag string `json:"etag"`
|
||||
Type string `json:"type"`
|
||||
AssetQuota int64 `json:"assetQuota"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
ShareCount int64 `json:"shareCount"`
|
||||
ShareAliasCount int64 `json:"shareAliasCount"`
|
||||
DirectChildrenCount int64 `json:"directChildrenCount"`
|
||||
Items []*DriveItem `json:"items"`
|
||||
NumberOfItems int64 `json:"numberOfItems"`
|
||||
Status string `json:"status"`
|
||||
Extension string `json:"extension,omitempty"`
|
||||
DateModified time.Time `json:"dateModified,omitempty"`
|
||||
DateChanged time.Time `json:"dateChanged,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
|
||||
Urls struct {
|
||||
URLDownload string `json:"url_download"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// IsFolder returns true if the item is a folder.
|
||||
func (d *DriveItem) IsFolder() bool {
|
||||
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
|
||||
}
|
||||
|
||||
// DownloadURL returns the download URL of the item.
|
||||
func (d *DriveItem) DownloadURL() string {
|
||||
return d.Urls.URLDownload
|
||||
}
|
||||
|
||||
// FullName returns the full name of the item.
|
||||
// name + extension
|
||||
func (d *DriveItem) FullName() string {
|
||||
if d.Extension != "" {
|
||||
return d.Name + "." + d.Extension
|
||||
}
|
||||
return d.Name
|
||||
}
|
||||
|
||||
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
|
||||
func GetDocIDFromDriveID(id string) string {
|
||||
split := strings.Split(id, "::")
|
||||
return split[len(split)-1]
|
||||
}
|
||||
|
||||
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
|
||||
func DeconstructDriveID(id string) (docType, zone, docid string) {
|
||||
split := strings.Split(id, "::")
|
||||
if len(split) < 3 {
|
||||
return "", "", id
|
||||
}
|
||||
return split[0], split[1], split[2]
|
||||
}
|
||||
|
||||
// ConstructDriveID constructs a drive ID from the given components.
|
||||
func ConstructDriveID(id string, zone string, t string) string {
|
||||
return strings.Join([]string{t, zone, id}, "::")
|
||||
}
|
||||
|
||||
// GetContentTypeForFile detects content type for given file name.
|
||||
func GetContentTypeForFile(name string) string {
|
||||
// detect MIME type by looking at the filename only
|
||||
mimeType := mime.TypeByExtension(filepath.Ext(name))
|
||||
if mimeType == "" {
|
||||
// api requires a mime type passed in
|
||||
mimeType = "text/plain"
|
||||
}
|
||||
return strings.Split(mimeType, ";")[0]
|
||||
}
|
412
backend/iclouddrive/api/session.go
Normal file
412
backend/iclouddrive/api/session.go
Normal file
|
@ -0,0 +1,412 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Session represents an iCloud session
|
||||
type Session struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
Scnt string `json:"scnt"`
|
||||
SessionID string `json:"session_id"`
|
||||
AccountCountry string `json:"account_country"`
|
||||
TrustToken string `json:"trust_token"`
|
||||
ClientID string `json:"client_id"`
|
||||
Cookies []*http.Cookie `json:"cookies"`
|
||||
AccountInfo AccountInfo `json:"account_info"`
|
||||
|
||||
srv *rest.Client `json:"-"`
|
||||
}
|
||||
|
||||
// String returns the session as a string
|
||||
// func (s *Session) String() string {
|
||||
// jsession, _ := json.Marshal(s)
|
||||
// return string(jsession)
|
||||
// }
|
||||
|
||||
// Request makes a request
|
||||
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
||||
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
|
||||
s.AccountCountry = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
|
||||
s.SessionID = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
|
||||
s.SessionToken = val
|
||||
}
|
||||
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
|
||||
s.TrustToken = val
|
||||
}
|
||||
if val := resp.Header.Get("scnt"); val != "" {
|
||||
s.Scnt = val
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Requires2FA returns true if the session requires 2FA
|
||||
func (s *Session) Requires2FA() bool {
|
||||
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
|
||||
}
|
||||
|
||||
// SignIn signs in the session
|
||||
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
|
||||
trustTokens := []string{}
|
||||
if s.TrustToken != "" {
|
||||
trustTokens = []string{s.TrustToken}
|
||||
}
|
||||
values := map[string]any{
|
||||
"accountName": appleID,
|
||||
"password": password,
|
||||
"rememberMe": true,
|
||||
"trustTokens": trustTokens,
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/signin",
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
|
||||
RootURL: authEndpoint,
|
||||
IgnoreStatus: true, // need to handle 409 for hsa2
|
||||
NoResponse: true,
|
||||
Body: body,
|
||||
}
|
||||
opts.Parameters.Set("isRememberMeEnabled", "true")
|
||||
_, err = s.Request(ctx, opts, nil, nil)
|
||||
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// AuthWithToken authenticates the session
|
||||
func (s *Session) AuthWithToken(ctx context.Context) error {
|
||||
values := map[string]any{
|
||||
"accountCountryCode": s.AccountCountry,
|
||||
"dsWebAuthToken": s.SessionToken,
|
||||
"extended_login": true,
|
||||
"trustToken": s.TrustToken,
|
||||
}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/accountLogin",
|
||||
ExtraHeaders: GetCommonHeaders(map[string]string{}),
|
||||
RootURL: setupEndpoint,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||
if err == nil {
|
||||
s.Cookies = resp.Cookies()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate2FACode validates the 2FA code
|
||||
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := s.GetAuthHeaders(map[string]string{})
|
||||
headers["scnt"] = s.Scnt
|
||||
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/verify/trusteddevice/securitycode",
|
||||
ExtraHeaders: headers,
|
||||
RootURL: authEndpoint,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
_, err = s.Request(ctx, opts, nil, nil)
|
||||
if err == nil {
|
||||
if err := s.TrustSession(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("validate2FACode failed: %w", err)
|
||||
}
|
||||
|
||||
// TrustSession trusts the session
|
||||
func (s *Session) TrustSession(ctx context.Context) error {
|
||||
headers := s.GetAuthHeaders(map[string]string{})
|
||||
headers["scnt"] = s.Scnt
|
||||
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/2sv/trust",
|
||||
ExtraHeaders: headers,
|
||||
RootURL: authEndpoint,
|
||||
NoResponse: true,
|
||||
ContentLength: common.Int64(0),
|
||||
}
|
||||
|
||||
_, err := s.Request(ctx, opts, nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trustSession failed: %w", err)
|
||||
}
|
||||
|
||||
return s.AuthWithToken(ctx)
|
||||
}
|
||||
|
||||
// ValidateSession validates the session
|
||||
func (s *Session) ValidateSession(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/validate",
|
||||
ExtraHeaders: s.GetHeaders(map[string]string{}),
|
||||
RootURL: setupEndpoint,
|
||||
ContentLength: common.Int64(0),
|
||||
}
|
||||
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("validateSession failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAuthHeaders returns the authentication headers for the session.
|
||||
//
|
||||
// It takes an `overwrite` map[string]string parameter which allows
|
||||
// overwriting the default headers. It returns a map[string]string.
|
||||
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := map[string]string{
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"X-Apple-OAuth-Client-Id": s.ClientID,
|
||||
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
|
||||
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
|
||||
"X-Apple-OAuth-Require-Grant-Code": "true",
|
||||
"X-Apple-OAuth-Response-Mode": "web_message",
|
||||
"X-Apple-OAuth-Response-Type": "code",
|
||||
"X-Apple-OAuth-State": s.ClientID,
|
||||
"X-Apple-Widget-Key": s.ClientID,
|
||||
"Origin": homeEndpoint,
|
||||
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetHeaders Gets the authentication headers required for a request
|
||||
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := GetCommonHeaders(map[string]string{})
|
||||
headers["Cookie"] = s.GetCookieString()
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// GetCookieString returns the cookie header string for the session.
|
||||
func (s *Session) GetCookieString() string {
|
||||
cookieHeader := ""
|
||||
// we only care about name and value.
|
||||
for _, cookie := range s.Cookies {
|
||||
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
|
||||
}
|
||||
return cookieHeader
|
||||
}
|
||||
|
||||
// GetCommonHeaders generates common HTTP headers with optional overwrite.
|
||||
func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
"Origin": baseEndpoint,
|
||||
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
|
||||
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
|
||||
var hashes []string
|
||||
for _, cookie := range right {
|
||||
hashes = append(hashes, cookie.Raw)
|
||||
}
|
||||
for _, cookie := range left {
|
||||
if !slices.Contains(hashes, cookie.Raw) {
|
||||
right = append(right, cookie)
|
||||
}
|
||||
}
|
||||
return right, nil
|
||||
}
|
||||
|
||||
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
|
||||
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
|
||||
var domainCookies []*http.Cookie
|
||||
for _, cookie := range cookies {
|
||||
if strings.HasSuffix(url.Host, cookie.Domain) {
|
||||
domainCookies = append(domainCookies, cookie)
|
||||
}
|
||||
}
|
||||
return domainCookies, nil
|
||||
}
|
||||
|
||||
// NewSession creates a new Session instance with default values.
|
||||
func NewSession() *Session {
|
||||
session := &Session{}
|
||||
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
|
||||
//session.ClientID = "auth-" + uuid.New().String()
|
||||
return session
|
||||
}
|
||||
|
||||
// AccountInfo represents an account info
|
||||
type AccountInfo struct {
|
||||
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
|
||||
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
Webservices map[string]*webService `json:"webservices"`
|
||||
PcsEnabled bool `json:"pcsEnabled"`
|
||||
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
|
||||
ConfigBag struct {
|
||||
Urls struct {
|
||||
AccountCreateUI string `json:"accountCreateUI"`
|
||||
AccountLoginUI string `json:"accountLoginUI"`
|
||||
AccountLogin string `json:"accountLogin"`
|
||||
AccountRepairUI string `json:"accountRepairUI"`
|
||||
DownloadICloudTerms string `json:"downloadICloudTerms"`
|
||||
RepairDone string `json:"repairDone"`
|
||||
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
|
||||
VettingURLForEmail string `json:"vettingUrlForEmail"`
|
||||
AccountCreate string `json:"accountCreate"`
|
||||
GetICloudTerms string `json:"getICloudTerms"`
|
||||
VettingURLForPhone string `json:"vettingUrlForPhone"`
|
||||
} `json:"urls"`
|
||||
AccountCreateEnabled bool `json:"accountCreateEnabled"`
|
||||
} `json:"configBag"`
|
||||
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
|
||||
AppsOrder []string `json:"appsOrder"`
|
||||
Version int `json:"version"`
|
||||
IsExtendedLogin bool `json:"isExtendedLogin"`
|
||||
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
|
||||
IsRepairNeeded bool `json:"isRepairNeeded"`
|
||||
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
|
||||
RequestInfo struct {
|
||||
Country string `json:"country"`
|
||||
TimeZone string `json:"timeZone"`
|
||||
Region string `json:"region"`
|
||||
} `json:"requestInfo"`
|
||||
PcsDeleted bool `json:"pcsDeleted"`
|
||||
ICloudInfo struct {
|
||||
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
|
||||
} `json:"iCloudInfo"`
|
||||
Apps map[string]*ValidateDataApp `json:"apps"`
|
||||
}
|
||||
|
||||
// ValidateDataDsInfo represents an validation info
|
||||
type ValidateDataDsInfo struct {
|
||||
HsaVersion int `json:"hsaVersion"`
|
||||
LastName string `json:"lastName"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
TantorMigrated bool `json:"tantorMigrated"`
|
||||
Dsid string `json:"dsid"`
|
||||
HsaEnabled bool `json:"hsaEnabled"`
|
||||
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||
Locale string `json:"locale"`
|
||||
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||
Gilligvited bool `json:"gilligvited"`
|
||||
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
||||
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||
CountryCode string `json:"countryCode"`
|
||||
NotificationID string `json:"notificationId"`
|
||||
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||
ADsID string `json:"aDsID"`
|
||||
Locked bool `json:"locked"`
|
||||
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||
PrimaryEmail string `json:"primaryEmail"`
|
||||
AppleIDEntries []struct {
|
||||
IsPrimary bool `json:"isPrimary"`
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
} `json:"appleIdEntries"`
|
||||
GilliganEnabled bool `json:"gilligan-enabled"`
|
||||
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
|
||||
FullName string `json:"fullName"`
|
||||
MailFlags struct {
|
||||
IsThreadingAvailable bool `json:"isThreadingAvailable"`
|
||||
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
|
||||
SCKMail bool `json:"sCKMail"`
|
||||
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
|
||||
} `json:"mailFlags"`
|
||||
LanguageCode string `json:"languageCode"`
|
||||
AppleID string `json:"appleId"`
|
||||
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
|
||||
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
|
||||
FirstName string `json:"firstName"`
|
||||
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
|
||||
NotesMigrated bool `json:"notesMigrated"`
|
||||
BeneficiaryInfo struct {
|
||||
IsBeneficiary bool `json:"isBeneficiary"`
|
||||
} `json:"beneficiaryInfo"`
|
||||
HasPaymentInfo bool `json:"hasPaymentInfo"`
|
||||
PcsDelet bool `json:"pcsDelet"`
|
||||
AppleIDAlias string `json:"appleIdAlias"`
|
||||
BrMigrated bool `json:"brMigrated"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
FamilyEligible bool `json:"familyEligible"`
|
||||
}
|
||||
|
||||
// ValidateDataApp represents an app
|
||||
type ValidateDataApp struct {
|
||||
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
|
||||
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
|
||||
}
|
||||
|
||||
// WebService represents a web service
|
||||
type webService struct {
|
||||
PcsRequired bool `json:"pcsRequired"`
|
||||
URL string `json:"url"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
Status string `json:"status"`
|
||||
}
|
1174
backend/iclouddrive/iclouddrive.go
Normal file
1174
backend/iclouddrive/iclouddrive.go
Normal file
File diff suppressed because it is too large
Load diff
18
backend/iclouddrive/iclouddrive_test.go
Normal file
18
backend/iclouddrive/iclouddrive_test.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
//go:build !plan9 && !solaris
|
||||
|
||||
package iclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/iclouddrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestICloudDrive:",
|
||||
NilObject: (*iclouddrive.Object)(nil),
|
||||
})
|
||||
}
|
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// Build for iclouddrive for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris
|
||||
|
||||
// Package iclouddrive implements the iCloud Drive backend
|
||||
package iclouddrive
|
|
@ -308,6 +308,12 @@ only useful for reading.
|
|||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "hashes",
|
||||
Help: `Comma separated list of supported checksum types.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
@ -334,6 +340,7 @@ type Options struct {
|
|||
NoSparse bool `config:"no_sparse"`
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Hashes fs.CommaSepList `config:"hashes"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
@ -1019,6 +1026,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if len(f.opt.Hashes) > 0 {
|
||||
// Return only configured hashes.
|
||||
// Note: Could have used hash.SupportOnly to limit supported hashes for all hash related features.
|
||||
var supported hash.Set
|
||||
for _, hashName := range f.opt.Hashes {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(hashName); err != nil {
|
||||
fs.Infof(nil, "Invalid token %q in hash string %q", hashName, f.opt.Hashes.String())
|
||||
}
|
||||
supported.Add(ht)
|
||||
}
|
||||
return supported
|
||||
}
|
||||
return hash.Supported()
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -1545,9 +1545,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return time.Millisecond
|
||||
}
|
||||
// While this is true for some OneDrive personal accounts, it
|
||||
// isn't true for all of them. See #8101 for details
|
||||
//
|
||||
// if f.driveType == driveTypePersonal {
|
||||
// return time.Millisecond
|
||||
// }
|
||||
return time.Second
|
||||
}
|
||||
|
||||
|
@ -1606,7 +1609,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
|||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
|
@ -1621,11 +1624,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Check we aren't overwriting a file on the same remote
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||
|
@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
|||
compareDirMeta(expectedMeta, actualMeta, false)
|
||||
|
||||
// modtime
|
||||
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
|
||||
// try changing it and re-check it
|
||||
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
|
||||
// ensure that f.DirSetModTime also works
|
||||
err = f.DirSetModTime(ctx, "subdir", t3)
|
||||
assert.NoError(t, err)
|
||||
|
@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
|||
assert.NoError(t, err)
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir.Remote() == "subdir" {
|
||||
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
|
||||
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -404,6 +404,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var uInfo usersInfoResponse
|
||||
var resp *http.Response
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/info.json/" + f.session.SessionID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(uInfo.StorageUsed),
|
||||
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
|
||||
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
|
@ -1147,6 +1173,7 @@ var (
|
|||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
|
|
|
@ -231,3 +231,10 @@ type permissions struct {
|
|||
type uploadFileChunkReply struct {
|
||||
TotalWritten int64 `json:"TotalWritten"`
|
||||
}
|
||||
|
||||
// usersInfoResponse describes OpenDrive users/info.json response
|
||||
type usersInfoResponse struct {
|
||||
// This response contains many other values but these are the only ones currently in use
|
||||
StorageUsed int64 `json:"StorageUsed,string"`
|
||||
MaxStorage int64 `json:"MaxStorage,string"`
|
||||
}
|
||||
|
|
|
@ -106,9 +106,9 @@ func newOptions() []fs.Option {
|
|||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
|
|
|
@ -399,14 +399,15 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
|
||||
return nil, fmt.Errorf("close file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
|
|
|
@ -18,21 +18,14 @@ import (
|
|||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
|
@ -72,8 +65,18 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
|||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
client, err := c.fs.newSingleConnClient(c.ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
|
||||
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -89,10 +92,15 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
|||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// close fd
|
||||
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
|
||||
return contentLength, fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
|
@ -125,11 +133,40 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
|
||||
opts.Parameters.Set("flags", "0x0002") // O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
func fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd, offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
|
@ -140,26 +177,29 @@ func (c *writerAt) fileChecksum(
|
|||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
func filePWrite(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
|
@ -176,24 +216,29 @@ func (c *writerAt) filePWrite(
|
|||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
func fileClose(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
|
@ -201,11 +246,11 @@ func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error
|
|||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
|
|
@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithLink(ctx, remote, nil)
|
||||
_, err := tempF.newObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
|
@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithLink(ctx, remote, nil)
|
||||
return f.newObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
|
||||
|
@ -516,35 +516,27 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
|
|||
return link, nil
|
||||
}
|
||||
|
||||
// readMetaDataForRemote reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// readMetaDataForLink reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
|
||||
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
|
||||
var err error
|
||||
if err = f.pacer.Call(func() (bool, error) {
|
||||
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||
return shouldRetry(ctx, err)
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link, fileSystemAttrs, nil
|
||||
return fileSystemAttrs, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// Return an Object from a path and link
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
|
||||
if o.link != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
|
||||
if err != nil {
|
||||
return err
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
o.id = link.LinkID
|
||||
|
@ -554,6 +546,10 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
|||
o.mimetype = link.MIMEType
|
||||
o.link = link
|
||||
|
||||
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileSystemAttrs != nil {
|
||||
o.modTime = fileSystemAttrs.ModificationTime
|
||||
o.originalSize = &fileSystemAttrs.Size
|
||||
|
@ -561,23 +557,18 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
|||
o.digests = &fileSystemAttrs.Digests
|
||||
}
|
||||
|
||||
return nil
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// Return an Object from a path only
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
err := o.readMetaData(ctx, link)
|
||||
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
return f.newObjectWithLink(ctx, remote, link)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
|
|
|
@ -572,6 +572,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We have successfully copied the file to random name
|
||||
// Check to see if file already exists first and delete it if so
|
||||
existingObj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
err = existingObj.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
|
|
152
backend/s3/s3.go
152
backend/s3/s3.go
|
@ -136,6 +136,9 @@ var providerOption = fs.Option{
|
|||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Outscale",
|
||||
Help: "OUTSCALE Object Storage (OOS)",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
|
@ -151,6 +154,9 @@ var providerOption = fs.Option{
|
|||
}, {
|
||||
Value: "SeaweedFS",
|
||||
Help: "SeaweedFS S3",
|
||||
}, {
|
||||
Value: "Selectel",
|
||||
Help: "Selectel Object Storage",
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
|
@ -488,6 +494,26 @@ func init() {
|
|||
Value: "eu-south-2",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "Outscale",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-west-2",
|
||||
Help: "Paris, France",
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "New Jersey, USA",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "California, USA",
|
||||
}, {
|
||||
Value: "cloudgouv-eu-west-1",
|
||||
Help: "SecNumCloud, Paris, France",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Tokyo, Japan",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
|
@ -528,10 +554,19 @@ func init() {
|
|||
Value: "tw-001",
|
||||
Help: "Asia (Taiwan)",
|
||||
}},
|
||||
}, {
|
||||
// See endpoints for object storage regions: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
|
||||
Name: "region",
|
||||
Help: "Region where your data stored.\n",
|
||||
Provider: "Selectel",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ru-1",
|
||||
Help: "St. Petersburg",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
|
@ -1296,10 +1331,19 @@ func init() {
|
|||
Value: "s3-ap-northeast-1.qiniucs.com",
|
||||
Help: "Northeast Asia Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
// Selectel endpoints: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Selectel Object Storage.",
|
||||
Provider: "Selectel",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.ru-1.storage.selcloud.ru",
|
||||
Help: "Saint Petersburg",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
|
@ -1344,6 +1388,26 @@ func init() {
|
|||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "oos.eu-west-2.outscale.com",
|
||||
Help: "Outscale EU West 2 (Paris)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.us-east-2.outscale.com",
|
||||
Help: "Outscale US east 2 (New Jersey)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.us-west-1.outscale.com",
|
||||
Help: "Outscale EU West 1 (California)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.cloudgouv-eu-west-1.outscale.com",
|
||||
Help: "Outscale SecNumCloud (Paris)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "oos.ap-northeast-1.outscale.com",
|
||||
Help: "Outscale AP Northeast 1 (Japan)",
|
||||
Provider: "Outscale",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East 1 (N. Virginia)",
|
||||
|
@ -1380,6 +1444,10 @@ func init() {
|
|||
Value: "s3.eu-west-2.wasabisys.com",
|
||||
Help: "Wasabi EU West 2 (Paris)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-south-1.wasabisys.com",
|
||||
Help: "Wasabi EU South 1 (Milan)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
|
||||
|
@ -1798,7 +1866,7 @@ func init() {
|
|||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
@ -1813,7 +1881,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
|||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Synology,Cloudflare",
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
|
@ -2606,6 +2674,35 @@ knows about - please make a bug report if not.
|
|||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_bucket",
|
||||
Help: strings.ReplaceAll(`Set to use AWS Directory Buckets
|
||||
|
||||
If you are using an AWS Directory Bucket then set this flag.
|
||||
|
||||
This will ensure no |Content-Md5| headers are sent and ensure |ETag|
|
||||
headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will
|
||||
be set on all objects whether single or multipart uploaded.
|
||||
|
||||
This also sets |no_check_bucket = true|.
|
||||
|
||||
Note that Directory Buckets do not support:
|
||||
|
||||
- Versioning
|
||||
- |Content-Encoding: gzip|
|
||||
|
||||
Rclone limitations with Directory Buckets:
|
||||
|
||||
- rclone does not support creating Directory Buckets with |rclone mkdir|
|
||||
- ... or removing them with |rclone rmdir| yet
|
||||
- Directory Buckets do not appear when doing |rclone lsf| at the top level.
|
||||
- Rclone can't remove auto created directories yet. In theory this should
|
||||
work with |directory_markers = true| but it doesn't.
|
||||
- Directories don't seem to appear in recursive (ListR) listings.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
Name: "sdk_log_mode",
|
||||
Help: strings.ReplaceAll(`Set to debug the SDK
|
||||
|
@ -2780,6 +2877,7 @@ type Options struct {
|
|||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
|
@ -3329,6 +3427,8 @@ func setQuirks(opt *Options) {
|
|||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "Outscale":
|
||||
virtualHostStyle = false
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
|
@ -3351,6 +3451,8 @@ func setQuirks(opt *Options) {
|
|||
}
|
||||
urlEncodeListings = true
|
||||
useAlreadyExists = true
|
||||
case "Selectel":
|
||||
urlEncodeListings = false
|
||||
case "SeaweedFS":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
|
@ -3552,6 +3654,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// MD5 digest of their object data.
|
||||
f.etagIsNotMD5 = true
|
||||
}
|
||||
if opt.DirectoryBucket {
|
||||
// Objects uploaded to directory buckets appear to have random ETags
|
||||
//
|
||||
// This doesn't appear to be documented
|
||||
f.etagIsNotMD5 = true
|
||||
// The normal API doesn't work for creating directory buckets, so don't try
|
||||
f.opt.NoCheckBucket = true
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
|
@ -5756,6 +5866,25 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
|||
return resp.Body, err
|
||||
}
|
||||
|
||||
// middleware to stop the SDK adding `Accept-Encoding: identity`
|
||||
func removeDisableGzip() func(*middleware.Stack) error {
|
||||
return func(stack *middleware.Stack) error {
|
||||
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// middleware to set Accept-Encoding to how we want it
|
||||
//
|
||||
// This make sure we download compressed files as-is from all platforms
|
||||
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
|
||||
APIOptions = append(APIOptions, removeDisableGzip())
|
||||
if f.opt.UseAcceptEncodingGzip.Value {
|
||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||
}
|
||||
return APIOptions
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
@ -5789,11 +5918,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
|
||||
var APIOptions []func(*middleware.Stack) error
|
||||
|
||||
// Override the automatic decompression in the transport to
|
||||
// download compressed files as-is
|
||||
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||
}
|
||||
// Set the SDK to always download compressed files as-is
|
||||
APIOptions = append(APIOptions, o.fs.acceptEncoding()...)
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
|
@ -6033,6 +6159,10 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
}
|
||||
if w.f.opt.DirectoryBucket {
|
||||
// Directory buckets do not support "Content-Md5" header
|
||||
uploadPartReq.ContentMD5 = nil
|
||||
}
|
||||
var uout *s3.UploadPartOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
|
@ -6309,7 +6439,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets
|
||||
// provided checksums aren't disabled
|
||||
ui.req.Metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
|
@ -6324,7 +6454,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||
if size >= 0 {
|
||||
ui.req.ContentLength = &size
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
|
|
|
@ -23,14 +23,20 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
|||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
opt := &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
TiersToTest: []string{"STANDARD"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
// Test wider range of tiers on AWS
|
||||
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
|
||||
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
|
||||
}
|
||||
fstests.Run(t, opt)
|
||||
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
|
|
|
@ -99,6 +99,11 @@ Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
|||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pubkey",
|
||||
Help: `SSH public certificate for public certificate based authentication.
|
||||
Set this if you have a signed certificate you want to use for authentication.
|
||||
If specified will override pubkey_file.`,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
|
@ -216,15 +221,45 @@ E.g. the second example above should be rewritten as:
|
|||
Help: "Windows Command Prompt",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "hashes",
|
||||
Help: `Comma separated list of supported checksum types.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "md5sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read md5 hashes.\n\nLeave blank for autodetect.",
|
||||
Help: "The command used to read MD5 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sha1sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes.\n\nLeave blank for autodetect.",
|
||||
Help: "The command used to read SHA-1 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "crc32sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read CRC-32 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sha256sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read SHA-256 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "blake3sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read BLAKE3 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "xxh3sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read XXH3 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "xxh128sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read XXH128 hashes.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
|
@ -511,6 +546,7 @@ type Options struct {
|
|||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKey string `config:"pubkey"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
|
@ -520,8 +556,14 @@ type Options struct {
|
|||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
ShellType string `config:"shell_type"`
|
||||
Hashes fs.CommaSepList `config:"hashes"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
Crc32sumCommand string `config:"crc32sum_command"`
|
||||
Sha256sumCommand string `config:"sha256sum_command"`
|
||||
Blake3sumCommand string `config:"blake3sum_command"`
|
||||
Xxh3sumCommand string `config:"xxh3sum_command"`
|
||||
Xxh128sumCommand string `config:"xxh128sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
|
@ -568,13 +610,18 @@ type Fs struct {
|
|||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64 // size of the object
|
||||
modTime uint32 // modification time of the object as unix time
|
||||
mode os.FileMode // mode bits from the file
|
||||
md5sum *string // Cached MD5 checksum
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64 // size of the object
|
||||
modTime uint32 // modification time of the object as unix time
|
||||
mode os.FileMode // mode bits from the file
|
||||
md5sum *string // Cached MD5 checksum
|
||||
sha1sum *string // Cached SHA-1 checksum
|
||||
crc32sum *string // Cached CRC-32 checksum
|
||||
sha256sum *string // Cached SHA-256 checksum
|
||||
blake3sum *string // Cached BLAKE3 checksum
|
||||
xxh3sum *string // Cached XXH3 checksum
|
||||
xxh128sum *string // Cached XXH128 checksum
|
||||
}
|
||||
|
||||
// conn encapsulates an ssh client and corresponding sftp client
|
||||
|
@ -997,13 +1044,21 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
}
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
if pubkeyFile != "" || opt.PubKey != "" {
|
||||
pubKeyRaw := []byte(opt.PubKey)
|
||||
// Use this error if public key is provided inline and is not a certificate
|
||||
// if public key file is provided instead, use the err in the if block
|
||||
notACertError := errors.New("public key provided is not a certificate: " + opt.PubKey)
|
||||
if opt.PubKey == "" {
|
||||
notACertError = errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
err := error(nil)
|
||||
pubKeyRaw, err = os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(pubKeyRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse cert file: %w", err)
|
||||
}
|
||||
|
@ -1017,7 +1072,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// knows everything it needs.
|
||||
cert, ok := pk.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
return nil, notACertError
|
||||
}
|
||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
|
@ -1609,14 +1664,113 @@ func (f *Fs) Hashes() hash.Set {
|
|||
return *f.cachedHashes
|
||||
}
|
||||
|
||||
hashSet := hash.NewHashSet()
|
||||
f.cachedHashes = &hashSet
|
||||
hashTypesSupported := hash.NewHashSet()
|
||||
f.cachedHashes = &hashTypesSupported
|
||||
|
||||
if f.opt.DisableHashCheck || f.shellType == shellTypeNotSupported {
|
||||
return hashSet
|
||||
return hashTypesSupported
|
||||
}
|
||||
|
||||
hashTypes := hash.NewHashSet()
|
||||
if len(f.opt.Hashes) > 0 {
|
||||
for _, hashName := range f.opt.Hashes {
|
||||
var hashType hash.Type
|
||||
if err := hashType.Set(hashName); err != nil {
|
||||
fs.Infof(nil, "Invalid token %q in hash string %q", hashName, f.opt.Hashes.String())
|
||||
}
|
||||
hashTypes.Add(hashType)
|
||||
}
|
||||
} else {
|
||||
hashTypes.Add(hash.MD5, hash.SHA1)
|
||||
}
|
||||
|
||||
hashCommands := map[hash.Type]struct {
|
||||
option *string
|
||||
emptyHash string
|
||||
hashCommands []struct{ hashFile, hashEmpty string }
|
||||
}{
|
||||
hash.MD5: {
|
||||
&f.opt.Md5sumCommand,
|
||||
"d41d8cd98f00b204e9800998ecf8427e",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
{"md5sum", "md5sum"},
|
||||
{"md5 -r", "md5 -r"},
|
||||
{"rclone md5sum", "rclone md5sum"},
|
||||
},
|
||||
},
|
||||
hash.SHA1: {
|
||||
&f.opt.Sha1sumCommand,
|
||||
"da39a3ee5e6b4b0d3255bfef95601890afd80709",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
{"sha1sum", "sha1sum"},
|
||||
{"sha1 -r", "sha1 -r"},
|
||||
{"rclone sha1sum", "rclone sha1sum"},
|
||||
},
|
||||
},
|
||||
hash.CRC32: {
|
||||
&f.opt.Sha1sumCommand,
|
||||
"00000000",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
{"crc32", "crc32"},
|
||||
{"rclone hashsum crc32", "rclone hashsum crc32"},
|
||||
},
|
||||
},
|
||||
hash.SHA256: {
|
||||
&f.opt.Sha256sumCommand,
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
{"sha256sum", "sha1sum"},
|
||||
{"sha256 -r", "sha1 -r"},
|
||||
{"rclone hashsum sha256", "rclone hashsum sha256"},
|
||||
},
|
||||
},
|
||||
hash.BLAKE3: {
|
||||
&f.opt.Blake3sumCommand,
|
||||
"af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
{"b3sum", "b3sum"},
|
||||
{"rclone hashsum blake3", "rclone hashsum blake3"},
|
||||
},
|
||||
},
|
||||
hash.XXH3: {
|
||||
&f.opt.Xxh3sumCommand,
|
||||
"2d06800538d394c2",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
// The xxhsum tool uses an alternative BSD style output format for the 64-bit variant of XXH3,
|
||||
// otherwise optional with argument --tag, to avoid confusion with the older 64-bit algorithm XXH64.
|
||||
// For the same reason there is no algorithm-specific alias, xxh3sum, either. We are currently not able
|
||||
// to parse this output format. Next release of xxHash after 0.8.2 will change to GNU style, classic
|
||||
// md5sum, output format, but will use a non-standard prefix "XXH3_" preceding the hash, so we still
|
||||
// need additional changes to be able to support it.
|
||||
//{"xxh3sum", "xxh3sum"},
|
||||
//{"xxhsum -H3", "xxhsum -H3"},
|
||||
{"rclone hashsum xxh3", "rclone hashsum xxh3"},
|
||||
},
|
||||
},
|
||||
hash.XXH128: {
|
||||
&f.opt.Xxh128sumCommand,
|
||||
"99aa06d3014798d86001c324468d497f",
|
||||
[]struct{ hashFile, hashEmpty string }{
|
||||
{"xxh128sum", "xxh128sum"},
|
||||
{"xxhsum -H2", "xxhsum -H2"},
|
||||
{"rclone hashsum xxh128", "rclone hashsum xxh128"},
|
||||
},
|
||||
},
|
||||
}
|
||||
if f.shellType == "powershell" {
|
||||
for _, hashType := range []hash.Type{hash.MD5, hash.SHA1, hash.SHA256} {
|
||||
if entry, ok := hashCommands[hashType]; ok {
|
||||
entry.hashCommands = append(hashCommands[hashType].hashCommands, struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
fmt.Sprintf("&{param($Path);Get-FileHash -Algorithm %v -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}", hashType),
|
||||
fmt.Sprintf("Get-FileHash -Algorithm %v -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}", hashType),
|
||||
})
|
||||
hashCommands[hashType] = entry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// look for a hash command which works
|
||||
checkHash := func(hashType hash.Type, commands []struct{ hashFile, hashEmpty string }, expected string, hashCommand *string, changed *bool) bool {
|
||||
if *hashCommand == hashCommandNotSupported {
|
||||
return false
|
||||
|
@ -1645,55 +1799,25 @@ func (f *Fs) Hashes() hash.Set {
|
|||
}
|
||||
|
||||
changed := false
|
||||
md5Commands := []struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
{"md5sum", "md5sum"},
|
||||
{"md5 -r", "md5 -r"},
|
||||
{"rclone md5sum", "rclone md5sum"},
|
||||
for _, hashType := range hashTypes.Array() {
|
||||
if entry, ok := hashCommands[hashType]; ok {
|
||||
if works := checkHash(hashType, entry.hashCommands, entry.emptyHash, entry.option, &changed); works {
|
||||
hashTypesSupported.Add(hashType)
|
||||
}
|
||||
}
|
||||
}
|
||||
sha1Commands := []struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
{"sha1sum", "sha1sum"},
|
||||
{"sha1 -r", "sha1 -r"},
|
||||
{"rclone sha1sum", "rclone sha1sum"},
|
||||
}
|
||||
if f.shellType == "powershell" {
|
||||
md5Commands = append(md5Commands, struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
"&{param($Path);Get-FileHash -Algorithm MD5 -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}",
|
||||
"Get-FileHash -Algorithm MD5 -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}",
|
||||
})
|
||||
|
||||
sha1Commands = append(sha1Commands, struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
"&{param($Path);Get-FileHash -Algorithm SHA1 -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}",
|
||||
"Get-FileHash -Algorithm SHA1 -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}",
|
||||
})
|
||||
}
|
||||
|
||||
md5Works := checkHash(hash.MD5, md5Commands, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash(hash.SHA1, sha1Commands, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
|
||||
if changed {
|
||||
// Save permanently in config to avoid the extra work next time
|
||||
fs.Debugf(f, "Setting hash command for %v to %q (set sha1sum_command to override)", hash.MD5, f.opt.Md5sumCommand)
|
||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||
fs.Debugf(f, "Setting hash command for %v to %q (set md5sum_command to override)", hash.SHA1, f.opt.Sha1sumCommand)
|
||||
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
|
||||
for _, hashType := range hashTypes.Array() {
|
||||
if entry, ok := hashCommands[hashType]; ok {
|
||||
fs.Debugf(f, "Setting hash command for %v to %q (set %vsum_command to override)", hashType, *entry.option, hashType)
|
||||
f.m.Set(fmt.Sprintf("%vsum_command", hashType), *entry.option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sha1Works {
|
||||
hashSet.Add(hash.SHA1)
|
||||
}
|
||||
if md5Works {
|
||||
hashSet.Add(hash.MD5)
|
||||
}
|
||||
|
||||
return hashSet
|
||||
return hashTypesSupported
|
||||
}
|
||||
|
||||
// About gets usage stats
|
||||
|
@ -1828,17 +1952,43 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||
_ = o.fs.Hashes()
|
||||
|
||||
var hashCmd string
|
||||
if r == hash.MD5 {
|
||||
switch r {
|
||||
case hash.MD5:
|
||||
if o.md5sum != nil {
|
||||
return *o.md5sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Md5sumCommand
|
||||
} else if r == hash.SHA1 {
|
||||
case hash.SHA1:
|
||||
if o.sha1sum != nil {
|
||||
return *o.sha1sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Sha1sumCommand
|
||||
} else {
|
||||
case hash.CRC32:
|
||||
if o.crc32sum != nil {
|
||||
return *o.crc32sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Crc32sumCommand
|
||||
case hash.SHA256:
|
||||
if o.sha256sum != nil {
|
||||
return *o.sha256sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Sha256sumCommand
|
||||
case hash.BLAKE3:
|
||||
if o.blake3sum != nil {
|
||||
return *o.blake3sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Blake3sumCommand
|
||||
case hash.XXH3:
|
||||
if o.xxh3sum != nil {
|
||||
return *o.xxh3sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Xxh3sumCommand
|
||||
case hash.XXH128:
|
||||
if o.xxh128sum != nil {
|
||||
return *o.xxh128sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Xxh128sumCommand
|
||||
default:
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if hashCmd == "" || hashCmd == hashCommandNotSupported {
|
||||
|
@ -1855,10 +2005,21 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
|||
}
|
||||
hashString := parseHash(outBytes)
|
||||
fs.Debugf(o, "Parsed hash: %s", hashString)
|
||||
if r == hash.MD5 {
|
||||
switch r {
|
||||
case hash.MD5:
|
||||
o.md5sum = &hashString
|
||||
} else if r == hash.SHA1 {
|
||||
case hash.SHA1:
|
||||
o.sha1sum = &hashString
|
||||
case hash.CRC32:
|
||||
o.crc32sum = &hashString
|
||||
case hash.SHA256:
|
||||
o.sha256sum = &hashString
|
||||
case hash.BLAKE3:
|
||||
o.blake3sum = &hashString
|
||||
case hash.XXH3:
|
||||
o.xxh3sum = &hashString
|
||||
case hash.XXH128:
|
||||
o.xxh128sum = &hashString
|
||||
}
|
||||
return hashString, nil
|
||||
}
|
||||
|
@ -1923,7 +2084,7 @@ func (f *Fs) remoteShellPath(remote string) string {
|
|||
}
|
||||
|
||||
// Converts a byte array from the SSH session returned by
|
||||
// an invocation of md5sum/sha1sum to a hash string
|
||||
// an invocation of hash command to a hash string
|
||||
// as expected by the rest of this application
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
|
@ -2087,10 +2248,10 @@ func (file *objectReader) Read(p []byte) (n int, err error) {
|
|||
|
||||
// Close a reader of a remote sftp file
|
||||
func (file *objectReader) Close() (err error) {
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Close the pipeReader so writes to the pipeWriter fail
|
||||
_ = file.pipeReader.Close()
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
|
@ -2152,6 +2313,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
o.crc32sum = nil
|
||||
o.sha256sum = nil
|
||||
o.blake3sum = nil
|
||||
o.xxh3sum = nil
|
||||
o.xxh128sum = nil
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
|
@ -867,13 +868,13 @@ func (f *Fs) Precision() time.Duration {
|
|||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -890,6 +891,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
|
|
@ -14,21 +14,30 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
if *wasLocked {
|
||||
// Assume a 404 error after we've received a 423 error is actually a success
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
*wasLocked = true
|
||||
fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime)
|
||||
time.Sleep(*sleepTime)
|
||||
*sleepTime *= 2
|
||||
return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs
|
|||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
sleepTime := 5 * time.Second
|
||||
wasLocked := false
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
|
@ -713,7 +714,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
|||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
|
@ -721,12 +722,21 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
err = f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
|
||||
// Find and remove existing object
|
||||
//
|
||||
// Note that the overwrite flag doesn't seem to work for server side copy
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// User is a Zoho user we are only interested in the ZUID here
|
||||
type User struct {
|
||||
// OAuthUser is a Zoho user we are only interested in the ZUID here
|
||||
type OAuthUser struct {
|
||||
FirstName string `json:"First_Name"`
|
||||
Email string `json:"Email"`
|
||||
LastName string `json:"Last_Name"`
|
||||
|
@ -36,12 +36,41 @@ type User struct {
|
|||
ZUID int64 `json:"ZUID"`
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team or workspace
|
||||
// UserInfoResponse is returned by the user info API.
|
||||
type UserInfoResponse struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"users"`
|
||||
Attributes struct {
|
||||
EmailID string `json:"email_id"`
|
||||
Edition string `json:"edition"`
|
||||
} `json:"attributes"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// PrivateSpaceInfo gives basic information about a users private folder.
|
||||
type PrivateSpaceInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CurrentTeamInfo gives information about the current user in a team.
|
||||
type CurrentTeamInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
}
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team, Workspace or Private Space
|
||||
// It's actually a VERY large json object that differs between
|
||||
// Team and Workspace but we are only interested in some fields
|
||||
// that both of them have so we can use the same struct for both
|
||||
// Team and Workspace and Private Space but we are only interested in some fields
|
||||
// that all of them have so we can use the same struct.
|
||||
type TeamWorkspace struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Attributes struct {
|
||||
Name string `json:"name"`
|
||||
Created Time `json:"created_time_in_millisecond"`
|
||||
|
@ -49,7 +78,8 @@ type TeamWorkspace struct {
|
|||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// TeamWorkspaceResponse is the response by the list teams api
|
||||
// TeamWorkspaceResponse is the response by the list teams API, list workspace API
|
||||
// or list team private spaces API.
|
||||
type TeamWorkspaceResponse struct {
|
||||
TeamWorkspace []TeamWorkspace `json:"data"`
|
||||
}
|
||||
|
@ -180,11 +210,38 @@ func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
|||
return &ufi, nil
|
||||
}
|
||||
|
||||
// LargeUploadInfo is once again a slightly different version of UploadInfo
|
||||
// returned as part of an LargeUploadResponse by the large file upload API.
|
||||
type LargeUploadInfo struct {
|
||||
Attributes struct {
|
||||
ParentID string `json:"parent_id"`
|
||||
FileName string `json:"file_name"`
|
||||
RessourceID string `json:"resource_id"`
|
||||
FileInfo string `json:"file_info"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// GetUploadFileInfo decodes the embedded FileInfo
|
||||
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||
var ufi UploadFileInfo
|
||||
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||
}
|
||||
return &ufi, nil
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a file Upload
|
||||
type UploadResponse struct {
|
||||
Uploads []UploadInfo `json:"data"`
|
||||
}
|
||||
|
||||
// LargeUploadResponse is the response returned by large file upload API.
|
||||
type LargeUploadResponse struct {
|
||||
Uploads []LargeUploadInfo `json:"data"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// WriteMetadataRequest is used to write metadata for a
|
||||
// single item
|
||||
type WriteMetadataRequest struct {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
@ -36,9 +37,11 @@ const (
|
|||
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
maxSleep = 60 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
configRootID = "root_folder_id"
|
||||
|
||||
defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -50,6 +53,7 @@ var (
|
|||
"WorkDrive.team.READ",
|
||||
"WorkDrive.workspace.READ",
|
||||
"WorkDrive.files.ALL",
|
||||
"ZohoFiles.files.ALL",
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
|
@ -61,6 +65,8 @@ var (
|
|||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
rootURL = "https://workdrive.zoho.eu/api/v1"
|
||||
downloadURL = "https://download.zoho.eu/v1/workdrive"
|
||||
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
|
||||
accountsURL = "https://accounts.zoho.eu"
|
||||
)
|
||||
|
||||
|
@ -79,7 +85,7 @@ func init() {
|
|||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err)
|
||||
}
|
||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
@ -88,12 +94,12 @@ func init() {
|
|||
|
||||
switch config.State {
|
||||
case "":
|
||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
})
|
||||
case "teams":
|
||||
case "type":
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
|
@ -108,24 +114,43 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
authSrv, apiSrv, err := getSrvs()
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the user Info
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
userInfo, err := getUserInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
// If personal Edition only one private Space is available. Directly configure that.
|
||||
if userInfo.Data.Attributes.Edition == "PERSONAL" {
|
||||
return fs.ConfigResult("private_space", userInfo.Data.ID)
|
||||
}
|
||||
// Otherwise go to team selection
|
||||
return fs.ConfigResult("team", userInfo.Data.ID)
|
||||
case "private_space":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "team":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the teams
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
teams, err := listTeams(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -143,9 +168,19 @@ func init() {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workspaces = append(workspaces, privateSpaces...)
|
||||
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
workspaceID := config.Result
|
||||
|
@ -179,7 +214,13 @@ browser.`,
|
|||
}, {
|
||||
Value: "com.au",
|
||||
Help: "Australia",
|
||||
}}}, {
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to large file upload api (>= 10 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
|
@ -193,6 +234,7 @@ browser.`,
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Region string `config:"region"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
|
@ -200,13 +242,15 @@ type Options struct {
|
|||
|
||||
// Fs represents a remote workdrive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
downloadsrv *rest.Client // the connection to the download server
|
||||
uploadsrv *rest.Client // the connection to the upload server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a Zoho WorkDrive object
|
||||
|
@ -229,6 +273,8 @@ func setupRegion(m configmap.Mapper) error {
|
|||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
|
@ -237,11 +283,63 @@ func setupRegion(m configmap.Mapper) error {
|
|||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
type workspaceInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) {
|
||||
var userInfo api.UserInfoResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/me",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &userInfo, nil
|
||||
}
|
||||
|
||||
func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) {
|
||||
var currentTeamInfo api.CurrentTeamInfo
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/currentuser",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, ¤tTeamInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ¤tTeamInfo, err
|
||||
}
|
||||
|
||||
func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var privateSpaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + teamUserID + "/privatespace",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range privateSpaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"})
|
||||
}
|
||||
return workspaceList, err
|
||||
}
|
||||
|
||||
func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var teamList api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams",
|
||||
Path: "/users/" + zuid + "/teams",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
|
||||
|
@ -251,18 +349,24 @@ func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWork
|
|||
return teamList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var workspaceList api.TeamWorkspaceResponse
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var workspaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/workspaces",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceList)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return workspaceList.TeamWorkspace, nil
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range workspaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name})
|
||||
}
|
||||
|
||||
return workspaceList, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
|
@ -285,13 +389,20 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
}
|
||||
authRetry := false
|
||||
|
||||
// Bail out early if we are missing OAuth Scopes.
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") {
|
||||
fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.")
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
if resp != nil && resp.StatusCode == 429 {
|
||||
fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err)
|
||||
time.Sleep(60 * time.Second)
|
||||
err = pacer.RetryAfterError(err, 60*time.Second)
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60)
|
||||
return true, err
|
||||
}
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
@ -389,6 +500,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < defaultUploadCutoff {
|
||||
return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff))
|
||||
}
|
||||
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -401,11 +517,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
|
||||
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
@ -643,9 +761,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
|
|||
return
|
||||
}
|
||||
|
||||
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/stream/upload",
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: "application/octet-stream",
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"x-filename": url.QueryEscape(name),
|
||||
"x-parent_id": parent,
|
||||
"override-name-exist": "true",
|
||||
"upload-id": uuid.New().String(),
|
||||
"x-streammode": "1",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var uploadResponse *api.LargeUploadResponse
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload large error: %v", err)
|
||||
}
|
||||
if len(uploadResponse.Uploads) != 1 {
|
||||
return nil, errors.New("upload: invalid response")
|
||||
}
|
||||
upload := uploadResponse.Uploads[0]
|
||||
uploadInfo, err := upload.GetUploadFileInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload error: %w", err)
|
||||
}
|
||||
|
||||
// Fill in the api.Item from the api.UploadFileInfo
|
||||
var info api.Item
|
||||
info.ID = upload.Attributes.RessourceID
|
||||
info.Attributes.Name = upload.Attributes.FileName
|
||||
// info.Attributes.Type = not used
|
||||
info.Attributes.IsFolder = false
|
||||
// info.Attributes.CreatedTime = not used
|
||||
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||
// info.Attributes.UploadedTime = 0 not used
|
||||
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||
info.Attributes.StorageInfo.FileCount = 0
|
||||
info.Attributes.StorageInfo.FolderCount = 0
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
params := url.Values{}
|
||||
params.Set("filename", name)
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
|
@ -705,21 +875,40 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(f.opt.UploadCutoff) {
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Upload the file
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
|
@ -1159,7 +1348,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.downloadsrv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1183,11 +1372,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return err
|
||||
}
|
||||
|
||||
// Overwrite the old file
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,8 @@ import (
|
|||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestZoho:",
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
RemoteName: "TestZoho:",
|
||||
SkipInvalidUTF8: true,
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
|
|||
continue
|
||||
fi
|
||||
|
||||
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1)
|
||||
if [ "$commit" == "" ]; then
|
||||
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1)
|
||||
fi
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1)
|
||||
echo $backend $version
|
||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||
done
|
||||
|
|
|
@ -21,12 +21,12 @@ def find_backends():
|
|||
def output_docs(backend, out, cwd):
|
||||
"""Output documentation for backend options to out"""
|
||||
out.flush()
|
||||
subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out)
|
||||
subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out)
|
||||
|
||||
def output_backend_tool_docs(backend, out, cwd):
|
||||
"""Output documentation for backend tool to out"""
|
||||
out.flush()
|
||||
subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
|
||||
def alter_doc(backend):
|
||||
"""Alter the documentation for backend"""
|
||||
|
|
|
@ -52,6 +52,7 @@ docs = [
|
|||
"hidrive.md",
|
||||
"http.md",
|
||||
"imagekit.md",
|
||||
"iclouddrive.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
|
|
|
@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
|
|||
exit 1
|
||||
fi
|
||||
VERSION="$1"
|
||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||
|
||||
cat > "/tmp/${VERSION}-release-notes" <<EOF
|
||||
This is the ${VERSION} release of rclone.
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -207,15 +208,16 @@ type bisyncTest struct {
|
|||
parent1 fs.Fs
|
||||
parent2 fs.Fs
|
||||
// global flags
|
||||
argRemote1 string
|
||||
argRemote2 string
|
||||
noCompare bool
|
||||
noCleanup bool
|
||||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
||||
argRemote1 string
|
||||
argRemote2 string
|
||||
noCompare bool
|
||||
noCleanup bool
|
||||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
||||
ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank
|
||||
}
|
||||
|
||||
var color = bisync.Color
|
||||
|
@ -946,6 +948,10 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
|||
if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") {
|
||||
b.t.Skip("skipping test as remote does not support empty dirs")
|
||||
}
|
||||
ignoreHashBackends := []string{"TestWebdavNextcloud", "TestWebdavOwncloud", "TestAzureFiles"} // backends that support hashes but allow them to be blank
|
||||
if slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs1.Name(), prefix) }) || slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs2.Name(), prefix) }) {
|
||||
b.ignoreBlankHash = true
|
||||
}
|
||||
if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported {
|
||||
if b.testCase != "nomodtime" {
|
||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||
|
@ -1551,6 +1557,12 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||
if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) {
|
||||
logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe)
|
||||
}
|
||||
if b.ignoreBlankHash {
|
||||
logReplacements = append(logReplacements,
|
||||
`^.*hash is missing.*$`, dropMe,
|
||||
`^.*not equal on recheck.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
rep := logReplacements
|
||||
if b.testCase == "dry_run" {
|
||||
rep = append(rep, dryrunReplacements...)
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -193,7 +194,7 @@ var commandDefinition = &cobra.Command{
|
|||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
os.Exit(2)
|
||||
return fserrors.FatalError(err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -21,7 +22,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ListingHeader defines first line of a listing
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code
|
||||
var ErrBisyncAborted = errors.New("bisync aborted")
|
||||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
|
|
56
cmd/cmd.go
56
cmd/cmd.go
|
@ -50,7 +50,6 @@ var (
|
|||
version bool
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
@ -84,12 +83,13 @@ func ShowVersion() {
|
|||
// It returns a string with the file name if points to a file
|
||||
// otherwise "".
|
||||
func NewFsFile(remote string) (fs.Fs, string) {
|
||||
ctx := context.Background()
|
||||
_, fsPath, err := fspath.SplitFs(remote)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
f, err := cache.Get(context.Background(), remote)
|
||||
f, err := cache.Get(ctx, remote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
|
@ -98,7 +98,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
|||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, ""
|
||||
default:
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return nil, ""
|
||||
|
@ -109,18 +109,19 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
|||
// This works the same as NewFsFile however it adds filters to the Fs
|
||||
// to limit it to a single file if the remote pointed to a file.
|
||||
func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
||||
fi := filter.GetConfig(context.Background())
|
||||
ctx := context.Background()
|
||||
fi := filter.GetConfig(ctx)
|
||||
f, fileName := NewFsFile(remote)
|
||||
if fileName != "" {
|
||||
if !fi.InActive() {
|
||||
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, err.Error())
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := fi.AddFile(fileName)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatalf(nil, "Failed to limit to single file %q: %v", remote, err)
|
||||
}
|
||||
}
|
||||
|
@ -140,9 +141,10 @@ func NewFsSrc(args []string) fs.Fs {
|
|||
//
|
||||
// This must point to a directory
|
||||
func newFsDir(remote string) fs.Fs {
|
||||
f, err := cache.Get(context.Background(), remote)
|
||||
ctx := context.Background()
|
||||
f, err := cache.Get(ctx, remote)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
|
@ -176,6 +178,7 @@ func NewFsSrcFileDst(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs)
|
|||
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
|
||||
// If src is a file then srcFileName and dstFileName will be non-empty
|
||||
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
|
||||
ctx := context.Background()
|
||||
fsrc, srcFileName = newFsFileAddFilter(args[0])
|
||||
// If copying a file...
|
||||
dstRemote := args[1]
|
||||
|
@ -194,14 +197,14 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
|||
fs.Fatalf(nil, "%q is a directory", args[1])
|
||||
}
|
||||
}
|
||||
fdst, err := cache.Get(context.Background(), dstRemote)
|
||||
fdst, err := cache.Get(ctx, dstRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
_ = fs.CountError(err)
|
||||
_ = fs.CountError(ctx, err)
|
||||
fs.Fatalf(nil, "Source doesn't exist or is a directory and destination is a file")
|
||||
case nil:
|
||||
default:
|
||||
_ = fs.CountError(err)
|
||||
_ = fs.CountError(ctx, err)
|
||||
fs.Fatalf(nil, "Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
cache.Pin(fdst) // pin indefinitely since it was on the CLI
|
||||
|
@ -235,7 +238,8 @@ func ShowStats() bool {
|
|||
|
||||
// Run the function with stats and retries if required
|
||||
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
var cmdErr error
|
||||
stopStats := func() {}
|
||||
if !showStats && ShowStats() {
|
||||
|
@ -249,7 +253,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
|||
SigInfoHandler()
|
||||
for try := 1; try <= ci.Retries; try++ {
|
||||
cmdErr = f()
|
||||
cmdErr = fs.CountError(cmdErr)
|
||||
cmdErr = fs.CountError(ctx, cmdErr)
|
||||
lastErr := accounting.GlobalStats().GetLastError()
|
||||
if cmdErr == nil {
|
||||
cmdErr = lastErr
|
||||
|
@ -437,19 +441,19 @@ func initConfig() {
|
|||
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
||||
f, err := os.Create(*cpuProfile)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
atexit.Register(func() {
|
||||
pprof.StopCPUProfile()
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
})
|
||||
|
@ -461,17 +465,17 @@ func initConfig() {
|
|||
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
||||
f, err := os.Create(*memProfile)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
err = pprof.WriteHeapProfile(f)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
})
|
||||
|
@ -479,7 +483,8 @@ func initConfig() {
|
|||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
atexit.Run()
|
||||
if err == nil {
|
||||
if ci.ErrorOnNoTransfer {
|
||||
|
@ -495,8 +500,6 @@ func resolveExitCode(err error) {
|
|||
os.Exit(exitcode.DirNotFound)
|
||||
case errors.Is(err, fs.ErrorObjectNotFound):
|
||||
os.Exit(exitcode.FileNotFound)
|
||||
case errors.Is(err, errorUncategorized):
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case errors.Is(err, fssync.ErrorMaxDurationReached):
|
||||
|
@ -507,8 +510,10 @@ func resolveExitCode(err error) {
|
|||
os.Exit(exitcode.NoRetryError)
|
||||
case fserrors.IsFatalError(err):
|
||||
os.Exit(exitcode.FatalError)
|
||||
default:
|
||||
case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments):
|
||||
os.Exit(exitcode.UsageError)
|
||||
default:
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -536,6 +541,7 @@ func Main() {
|
|||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||
}
|
||||
fs.Fatalf(nil, "Fatal error: %v", err)
|
||||
fs.Logf(nil, "Fatal error: %v", err)
|
||||
os.Exit(exitcode.UsageError)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -373,6 +373,9 @@ func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) {
|
|||
|
||||
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
||||
if err != nil {
|
||||
if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") {
|
||||
return nil, fmt.Errorf("mounting is not supported when running from snap")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||
}
|
||||
m.MountedOn = time.Now()
|
||||
|
|
|
@ -190,16 +190,17 @@ func (s *server) ModelNumber() string {
|
|||
|
||||
// Renders the root device descriptor.
|
||||
func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
tmpl, err := data.GetTemplate()
|
||||
if err != nil {
|
||||
serveError(s, w, "Failed to load root descriptor template", err)
|
||||
serveError(ctx, s, w, "Failed to load root descriptor template", err)
|
||||
return
|
||||
}
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
err = tmpl.Execute(buffer, s)
|
||||
if err != nil {
|
||||
serveError(s, w, "Failed to render root descriptor XML", err)
|
||||
serveError(ctx, s, w, "Failed to render root descriptor XML", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -215,15 +216,16 @@ func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// Handle a service control HTTP request.
|
||||
func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
soapActionString := r.Header.Get("SOAPACTION")
|
||||
soapAction, err := upnp.ParseActionHTTPHeader(soapActionString)
|
||||
if err != nil {
|
||||
serveError(s, w, "Could not parse SOAPACTION header", err)
|
||||
serveError(ctx, s, w, "Could not parse SOAPACTION header", err)
|
||||
return
|
||||
}
|
||||
var env soap.Envelope
|
||||
if err := xml.NewDecoder(r.Body).Decode(&env); err != nil {
|
||||
serveError(s, w, "Could not parse SOAP request body", err)
|
||||
serveError(ctx, s, w, "Could not parse SOAP request body", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -257,6 +259,7 @@ func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte,
|
|||
|
||||
// Serves actual resources (media files).
|
||||
func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
remotePath := r.URL.Path
|
||||
node, err := s.vfs.Stat(r.URL.Path)
|
||||
if err != nil {
|
||||
|
@ -277,7 +280,7 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
file := node.(*vfs.File)
|
||||
in, err := file.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
serveError(node, w, "Could not open resource", err)
|
||||
serveError(ctx, node, w, "Could not open resource", err)
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dlna
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
|
@ -142,9 +143,10 @@ func logging(next http.Handler) http.Handler {
|
|||
// Error recovery and general request logging are left to logging().
|
||||
func traceLogging(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
dump, err := httputil.DumpRequest(r, true)
|
||||
if err != nil {
|
||||
serveError(nil, w, "error dumping request", err)
|
||||
serveError(ctx, nil, w, "error dumping request", err)
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "%s", dump)
|
||||
|
@ -182,8 +184,8 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
|
|||
}
|
||||
|
||||
// serveError returns an http.StatusInternalServerError and logs the error
|
||||
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
err = fs.CountError(err)
|
||||
func serveError(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Errorf(what, "%s: %v", text, err)
|
||||
http.Error(w, text+".", http.StatusInternalServerError)
|
||||
}
|
||||
|
|
|
@ -186,6 +186,7 @@ func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// serveDir serves a directory index at dirRemote
|
||||
func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
ctx := r.Context()
|
||||
VFS, err := s.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "Root directory not found", http.StatusNotFound)
|
||||
|
@ -198,7 +199,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
|||
http.Error(w, "Directory not found", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
serve.Error(dirRemote, w, "Failed to list directory", err)
|
||||
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
|
||||
return
|
||||
}
|
||||
if !node.IsDir() {
|
||||
|
@ -208,7 +209,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
|||
dir := node.(*vfs.Dir)
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
serve.Error(dirRemote, w, "Failed to list directory", err)
|
||||
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -234,6 +235,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
|||
|
||||
// serveFile serves a file object at remote
|
||||
func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
ctx := r.Context()
|
||||
VFS, err := s.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "File not found", http.StatusNotFound)
|
||||
|
@ -247,7 +249,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string)
|
|||
http.Error(w, "File not found", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
serve.Error(remote, w, "Failed to find file", err)
|
||||
serve.Error(ctx, remote, w, "Failed to find file", err)
|
||||
return
|
||||
}
|
||||
if !node.IsFile() {
|
||||
|
@ -287,7 +289,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string)
|
|||
// open the object
|
||||
in, err := file.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
serve.Error(remote, w, "Failed to open file", err)
|
||||
serve.Error(ctx, remote, w, "Failed to open file", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
|
|
|
@ -95,61 +95,44 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
|||
if err != nil {
|
||||
return fmt.Errorf("send output failed: %w", err)
|
||||
}
|
||||
case "md5sum", "sha1sum":
|
||||
ht := hash.MD5
|
||||
if binary == "sha1sum" {
|
||||
ht = hash.SHA1
|
||||
case "md5sum":
|
||||
return c.handleHashsumCommand(ctx, out, hash.MD5, args)
|
||||
case "sha1sum":
|
||||
return c.handleHashsumCommand(ctx, out, hash.SHA1, args)
|
||||
case "crc32":
|
||||
return c.handleHashsumCommand(ctx, out, hash.CRC32, args)
|
||||
case "sha256sum":
|
||||
return c.handleHashsumCommand(ctx, out, hash.SHA256, args)
|
||||
case "b3sum":
|
||||
return c.handleHashsumCommand(ctx, out, hash.BLAKE3, args)
|
||||
case "xxh128sum":
|
||||
return c.handleHashsumCommand(ctx, out, hash.XXH128, args)
|
||||
case "xxhsum":
|
||||
argv := strings.SplitN(args, " ", 2)
|
||||
if len(argv) == 0 || argv[0] != "-H2" {
|
||||
return fmt.Errorf("%q not implemented", command)
|
||||
}
|
||||
if !c.vfs.Fs().Hashes().Contains(ht) {
|
||||
return fmt.Errorf("%v hash not supported", ht)
|
||||
}
|
||||
var hashSum string
|
||||
if args == "" {
|
||||
// empty hash for no input
|
||||
if ht == hash.MD5 {
|
||||
hashSum = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
} else {
|
||||
hashSum = "da39a3ee5e6b4b0d3255bfef95601890afd80709"
|
||||
}
|
||||
args = "-"
|
||||
if len(argv) > 1 {
|
||||
args = argv[1]
|
||||
} else {
|
||||
node, err := c.vfs.Stat(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash failed finding file %q: %w", args, err)
|
||||
args = ""
|
||||
}
|
||||
return c.handleHashsumCommand(ctx, out, hash.XXH128, args)
|
||||
case "rclone":
|
||||
argv := strings.SplitN(args, " ", 3)
|
||||
if len(argv) > 1 && argv[0] == "hashsum" {
|
||||
var ht hash.Type
|
||||
if err := ht.Set(argv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
if node.IsDir() {
|
||||
return errors.New("can't hash directory")
|
||||
}
|
||||
o, ok := node.DirEntry().(fs.ObjectInfo)
|
||||
if !ok {
|
||||
fs.Debugf(args, "File uploading - reading hash from VFS cache")
|
||||
in, err := node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash vfs open failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash vfs create multi-hasher failed: %w", err)
|
||||
}
|
||||
_, err = io.Copy(h, in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash vfs copy failed: %w", err)
|
||||
}
|
||||
hashSum = h.Sums()[ht]
|
||||
if len(argv) > 2 {
|
||||
args = argv[2]
|
||||
} else {
|
||||
hashSum, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash failed: %w", err)
|
||||
}
|
||||
args = ""
|
||||
}
|
||||
return c.handleHashsumCommand(ctx, out, ht, args)
|
||||
}
|
||||
_, err = fmt.Fprintf(out, "%s %s\n", hashSum, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send output failed: %w", err)
|
||||
}
|
||||
return fmt.Errorf("%q not implemented", command)
|
||||
case "echo":
|
||||
// Special cases for legacy rclone command detection.
|
||||
// Before rclone v1.49.0 the sftp backend used "echo 'abc' | md5sum" when
|
||||
|
@ -189,6 +172,74 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
|||
return nil
|
||||
}
|
||||
|
||||
// handleHashsumCommand is a helper to execCommand for common functionality of hashsum related commands
|
||||
func (c *conn) handleHashsumCommand(ctx context.Context, out io.Writer, ht hash.Type, args string) (err error) {
|
||||
if !c.vfs.Fs().Hashes().Contains(ht) {
|
||||
return fmt.Errorf("%v hash not supported", ht)
|
||||
}
|
||||
var hashSum string
|
||||
if args == "" {
|
||||
// empty hash for no input
|
||||
switch ht {
|
||||
case hash.MD5:
|
||||
hashSum = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
case hash.SHA1:
|
||||
hashSum = "da39a3ee5e6b4b0d3255bfef95601890afd80709"
|
||||
case hash.CRC32:
|
||||
hashSum = "00000000"
|
||||
case hash.SHA256:
|
||||
hashSum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
case hash.BLAKE3:
|
||||
hashSum = "af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262"
|
||||
case hash.XXH3:
|
||||
hashSum = "2d06800538d394c2"
|
||||
case hash.XXH128:
|
||||
hashSum = "99aa06d3014798d86001c324468d497f"
|
||||
default:
|
||||
return fmt.Errorf("%v hash not implemented", ht)
|
||||
}
|
||||
args = "-"
|
||||
} else {
|
||||
node, err := c.vfs.Stat(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash failed finding file %q: %w", args, err)
|
||||
}
|
||||
if node.IsDir() {
|
||||
return errors.New("can't hash directory")
|
||||
}
|
||||
o, ok := node.DirEntry().(fs.ObjectInfo)
|
||||
if !ok {
|
||||
fs.Debugf(args, "File uploading - reading hash from VFS cache")
|
||||
in, err := node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash vfs open failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash vfs create multi-hasher failed: %w", err)
|
||||
}
|
||||
_, err = io.Copy(h, in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash vfs copy failed: %w", err)
|
||||
}
|
||||
hashSum = h.Sums()[ht]
|
||||
} else {
|
||||
hashSum, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = fmt.Fprintf(out, "%s %s\n", hashSum, args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send output failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handle a new incoming channel request
|
||||
func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
||||
fs.Debugf(c.what, "Incoming channel: %s\n", newChannel.ChannelType())
|
||||
|
|
|
@ -143,8 +143,13 @@ func (s *server) serve() (err error) {
|
|||
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
|
||||
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
|
||||
// If user set the flag away from the default then report an error
|
||||
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
return err
|
||||
if s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(authorizedKeysMap) == 0 {
|
||||
return fmt.Errorf("failed to parse authorized keys")
|
||||
}
|
||||
}
|
||||
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
||||
}
|
||||
|
@ -349,11 +354,10 @@ func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string
|
|||
authorizedKeysMap = make(map[string]struct{})
|
||||
for len(authorizedKeysBytes) > 0 {
|
||||
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
|
||||
if err == nil {
|
||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||
}
|
||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||
}
|
||||
return authorizedKeysMap, nil
|
||||
}
|
||||
|
|
|
@ -349,6 +349,7 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
|||
// serveDir serves a directory index at dirRemote
|
||||
// This is similar to serveDir in serve http.
|
||||
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
ctx := r.Context()
|
||||
VFS, err := w.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(rw, "Root directory not found", http.StatusNotFound)
|
||||
|
@ -361,7 +362,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
|||
http.Error(rw, "Directory not found", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
serve.Error(dirRemote, rw, "Failed to list directory", err)
|
||||
serve.Error(ctx, dirRemote, rw, "Failed to list directory", err)
|
||||
return
|
||||
}
|
||||
if !node.IsDir() {
|
||||
|
@ -372,7 +373,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
|||
dirEntries, err := dir.ReadDirAll()
|
||||
|
||||
if err != nil {
|
||||
serve.Error(dirRemote, rw, "Failed to list directory", err)
|
||||
serve.Error(ctx, dirRemote, rw, "Failed to list directory", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ func TestCmdTest(t *testing.T) {
|
|||
// Test error and error output
|
||||
out, err = rclone("version", "--provoke-an-error")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "exit status 1")
|
||||
assert.Contains(t, err.Error(), "exit status 2")
|
||||
assert.Contains(t, out, "Error: unknown flag")
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCmdTest demonstrates and verifies the test functions for end-to-end testing of rclone
|
||||
// TestEnvironmentVariables demonstrates and verifies the test functions for end-to-end testing of rclone
|
||||
func TestEnvironmentVariables(t *testing.T) {
|
||||
|
||||
createTestEnvironment(t)
|
||||
|
|
|
@ -66,12 +66,13 @@ so it is easy to tweak stuff.
|
|||
└── static - static content for the website
|
||||
├── css
|
||||
│ ├── bootstrap.css
|
||||
│ ├── custom.css - custom css goes here
|
||||
│ └── font-awesome.css
|
||||
│ └── custom.css - custom css goes here
|
||||
├── fontawesome
|
||||
│ ├── css
|
||||
│ └── webfonts
|
||||
├── img - images used
|
||||
├── js
|
||||
│ ├── bootstrap.js
|
||||
│ ├── custom.js - custom javascript goes here
|
||||
│ └── jquery.js
|
||||
└── webfonts
|
||||
```
|
||||
|
|
|
@ -132,6 +132,7 @@ WebDAV or S3, that work out of the box.)
|
|||
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
||||
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
||||
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
|
@ -159,6 +160,7 @@ WebDAV or S3, that work out of the box.)
|
|||
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
|
||||
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
|
||||
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
|
@ -176,6 +178,7 @@ WebDAV or S3, that work out of the box.)
|
|||
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
|
|
|
@ -889,3 +889,25 @@ put them back in again.` >}}
|
|||
* Mathieu Moreau <mrx23dot@users.noreply.github.com>
|
||||
* fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com>
|
||||
* Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com>
|
||||
* Divyam <47589864+divyam234@users.noreply.github.com>
|
||||
* ttionya <ttionya@users.noreply.github.com>
|
||||
* quiescens <quiescens@gmail.com>
|
||||
* rishi.sridhar <rishi.sridhar@zohocorp.com>
|
||||
* Lawrence Murray <lawrence@indii.org>
|
||||
* Leandro Piccilli <leandro.piccilli@thalesgroup.com>
|
||||
* Benjamin Legrand <benjamin.legrand@seagate.com>
|
||||
* Noam Ross <noam.ross@gmail.com>
|
||||
* lostb1t <coding-mosses0z@icloud.com>
|
||||
* Matthias Gatto <matthias.gatto@outscale.com>
|
||||
* André Tran <andre.tran@outscale.com>
|
||||
* Simon Bos <simon@simonbos.be>
|
||||
* Alexandre Hamez <199517+ahamez@users.noreply.github.com>
|
||||
* Randy Bush <randy@psg.com>
|
||||
* Diego Monti <diegmonti@users.noreply.github.com>
|
||||
* tgfisher <tgfisher@stanford.edu>
|
||||
* Moises Lima <mozlima@gmail.com>
|
||||
* Dimitar Ivanov <mimiteto@gmail.com>
|
||||
* shenpengfeng <xinhangzhou@icloud.com>
|
||||
* Dimitrios Slamaris <dim0x69@users.noreply.github.com>
|
||||
* vintagefuture <39503528+vintagefuture@users.noreply.github.com>
|
||||
* David Seifert <soap@gentoo.org>
|
||||
|
|
|
@ -180,6 +180,13 @@ If the resource has multiple user-assigned identities you will need to
|
|||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||
section](#use_msi).
|
||||
|
||||
If you are operating in disconnected clouds, or private clouds such as
|
||||
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||
This determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before authenticating.
|
||||
Setting this to `true` will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||
|
||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
|
@ -290,6 +297,16 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
|||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
|
||||
Don't set `env_auth` at the same time.
|
||||
|
||||
#### Anonymous {#anonymous}
|
||||
|
||||
If you want to access resources with public anonymous access then set
|
||||
|
|
|
@ -968,12 +968,15 @@ that while concurrent bisync runs are allowed, _be very cautious_
|
|||
that there is no overlap in the trees being synched between concurrent runs,
|
||||
lest there be replicated files, deleted files and general mayhem.
|
||||
|
||||
### Return codes
|
||||
### Exit codes
|
||||
|
||||
`rclone bisync` returns the following codes to calling program:
|
||||
- `0` on a successful run,
|
||||
- `1` for a non-critical failing run (a rerun may be successful),
|
||||
- `2` for a critically aborted run (requires a `--resync` to recover).
|
||||
- `2` on syntax or usage error,
|
||||
- `7` for a critically aborted run (requires a `--resync` to recover).
|
||||
|
||||
See also the section about [exit codes](/docs/#exit-code) in main docs.
|
||||
|
||||
### Graceful Shutdown
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ If not sure try Y. If Y failed, try N.
|
|||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=XXXXXXXXXXXXXXXXXXXXXX
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
|
|
|
@ -510,7 +510,7 @@ rclone [flags]
|
|||
--metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
|
||||
--metadata-mapper SpaceSepList Program to run to transforming metadata before upload
|
||||
--metadata-set stringArray Add metadata key=value when uploading
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -616,18 +616,21 @@ rclone [flags]
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
@ -680,7 +683,7 @@ rclone [flags]
|
|||
--quatrix-skip-project-folders Skip project folders in operations
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -929,7 +932,7 @@ rclone [flags]
|
|||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
||||
|
|
|
@ -54,9 +54,7 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -400,9 +398,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
|
|
@ -55,9 +55,7 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -401,9 +399,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ Flags to control the Remote Control API
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
|
|
@ -537,6 +537,13 @@ sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docke
|
|||
```
|
||||
though this is rarely needed.
|
||||
|
||||
If the plugin fails to work properly, and only as a last resort after you tried diagnosing with the above methods, you can try clearing the state of the plugin. **Note that all existing rclone docker volumes will probably have to be recreated.** This might be needed because a reinstall don't cleanup existing state files to allow for easy restoration, as stated above.
|
||||
```
|
||||
docker plugin disable rclone # disable the plugin to ensure no interference
|
||||
sudo rm /var/lib/docker-plugins/rclone/cache/docker-plugin.state # removing the plugin state
|
||||
docker plugin enable rclone # re-enable the plugin afterward
|
||||
```
|
||||
|
||||
## Caveats
|
||||
|
||||
Finally I'd like to mention a _caveat with updating volume settings_.
|
||||
|
|
|
@ -53,6 +53,7 @@ See the following for detailed instructions for
|
|||
* [Hetzner Storage Box](/sftp/#hetzner-storage-box)
|
||||
* [HiDrive](/hidrive/)
|
||||
* [HTTP](/http/)
|
||||
* [iCloud Drive](/iclouddrive/)
|
||||
* [Internet Archive](/internetarchive/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
|
@ -2868,9 +2869,9 @@ messages may not be valid after the retry. If rclone has done a retry
|
|||
it will log a high priority message if the retry was successful.
|
||||
|
||||
### List of exit codes ###
|
||||
* `0` - success
|
||||
* `1` - Syntax or usage error
|
||||
* `2` - Error not otherwise categorised
|
||||
* `0` - Success
|
||||
* `1` - Error not otherwise categorised
|
||||
* `2` - Syntax or usage error
|
||||
* `3` - Directory not found
|
||||
* `4` - File not found
|
||||
* `5` - Temporary error (one that more retries might fix) (Retry errors)
|
||||
|
|
|
@ -536,6 +536,7 @@ represent the currently available conversions.
|
|||
| html | text/html | An HTML Document |
|
||||
| jpg | image/jpeg | A JPEG Image File |
|
||||
| json | application/vnd.google-apps.script+json | JSON Text Format for Google Apps scripts |
|
||||
| md | text/markdown | Markdown Text Format |
|
||||
| odp | application/vnd.oasis.opendocument.presentation | Openoffice Presentation |
|
||||
| ods | application/vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
|
||||
| ods | application/x-vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
|
||||
|
|
|
@ -4,7 +4,7 @@ description: "Rclone docs for Files.com"
|
|||
versionIntroduced: "v1.68"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-file-alt" >}} Files.com
|
||||
# {{< icon "fa fa-brands fa-files-pinwheel" >}} Files.com
|
||||
|
||||
[Files.com](https://www.files.com/) is a cloud storage service that provides a
|
||||
secure and easy way to store and share files.
|
||||
|
|
|
@ -115,7 +115,7 @@ Flags for general networking and HTTP stuff.
|
|||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
```
|
||||
|
||||
|
||||
|
@ -264,7 +264,7 @@ Flags to control the Remote Control API.
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -300,7 +300,7 @@ Flags to control the Remote Control API.
|
|||
Flags to control the Metrics HTTP endpoint..
|
||||
|
||||
```
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -794,18 +794,21 @@ Backend-only flags (these can be set in the config file also).
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
|
|
@ -201,6 +201,55 @@ the rclone config file, you can set `service_account_credentials` with
|
|||
the actual contents of the file instead, or set the equivalent
|
||||
environment variable.
|
||||
|
||||
### Service Account Authentication with Access Tokens
|
||||
|
||||
Another option for service account authentication is to use access tokens via *gcloud impersonate-service-account*. Access tokens protect security by avoiding the use of the JSON
|
||||
key file, which can be breached. They also bypass oauth login flow, which is simpler
|
||||
on remote VMs that lack a web browser.
|
||||
|
||||
If you already have a working service account, skip to step 3.
|
||||
|
||||
#### 1. Create a service account using
|
||||
|
||||
gcloud iam service-accounts create gcs-read-only
|
||||
|
||||
You can re-use an existing service account as well (like the one created above)
|
||||
|
||||
#### 2. Attach a Viewer (read-only) or User (read-write) role to the service account
|
||||
$ PROJECT_ID=my-project
|
||||
$ gcloud --verbose iam service-accounts add-iam-policy-binding \
|
||||
gcs-read-only@${PROJECT_ID}.iam.gserviceaccount.com \
|
||||
--member=serviceAccount:gcs-read-only@${PROJECT_ID}.iam.gserviceaccount.com \
|
||||
--role=roles/storage.objectViewer
|
||||
|
||||
Use the Google Cloud console to identify a limited role. Some relevant pre-defined roles:
|
||||
|
||||
* *roles/storage.objectUser* -- read-write access but no admin privileges
|
||||
* *roles/storage.objectViewer* -- read-only access to objects
|
||||
* *roles/storage.admin* -- create buckets & administrative roles
|
||||
|
||||
#### 3. Get a temporary access key for the service account
|
||||
|
||||
$ gcloud auth application-default print-access-token \
|
||||
--impersonate-service-account \
|
||||
dev-gcloud-go@${PROJECT_ID}.iam.gserviceaccount.com
|
||||
|
||||
ya29.c.c0ASRK0GbAFEewXD [truncated]
|
||||
|
||||
#### 4. Update `access_token` setting
|
||||
hit `CTRL-C` when you see *waiting for code*. This will save the config without doing oauth flow
|
||||
|
||||
rclone config update ${REMOTE_NAME} access_token ya29.c.c0Axxxx
|
||||
|
||||
#### 5. Run rclone as usual
|
||||
|
||||
rclone ls dev-gcs:${MY_BUCKET}/
|
||||
|
||||
### More Info on Service Accounts
|
||||
|
||||
* [Official GCS Docs](https://cloud.google.com/compute/docs/access/service-accounts)
|
||||
* [Guide on Service Accounts using Key Files (less secure, but similar concepts)](https://forum.rclone.org/t/access-using-google-service-account/24822/2)
|
||||
|
||||
### Anonymous Access
|
||||
|
||||
For downloads of objects that permit public access you can configure rclone
|
||||
|
@ -363,6 +412,20 @@ Properties:
|
|||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --gcs-access-token
|
||||
|
||||
Short-lived access token.
|
||||
|
||||
Leave blank normally.
|
||||
Needed only if you want use short-lived access tokens instead of interactive login.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_GCS_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --gcs-anonymous
|
||||
|
||||
Access public buckets and objects without credentials.
|
||||
|
|
|
@ -502,12 +502,18 @@ is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115)
|
|||
|
||||
**The current google API does not allow photos to be downloaded at original resolution. This is very important if you are, for example, relying on "Google Photos" as a backup of your photos. You will not be able to use rclone to redownload original images. You could use 'google takeout' to recover the original photos as a last resort**
|
||||
|
||||
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
|
||||
headless browser to download images in full resolution.
|
||||
|
||||
### Downloading Videos
|
||||
|
||||
When videos are downloaded they are downloaded in a really compressed
|
||||
version of the video compared to downloading it via the Google Photos
|
||||
web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044).
|
||||
|
||||
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
|
||||
headless browser to download images in full resolution.
|
||||
|
||||
### Duplicates
|
||||
|
||||
If a file name is duplicated in a directory then rclone will add the
|
||||
|
|
159
docs/content/iclouddrive.md
Normal file
159
docs/content/iclouddrive.md
Normal file
|
@ -0,0 +1,159 @@
|
|||
---
|
||||
title: "iCloud Drive"
|
||||
description: "Rclone docs for iCloud Drive"
|
||||
versionIntroduced: "v1.69"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} iCloud Drive
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
The initial setup for an iCloud Drive backend involves getting a trust token/session. This can be done by simply using the regular iCloud password, and accepting the code prompt on another iCloud connected device.
|
||||
|
||||
`IMPORTANT: At the moment an app specific password won't be accepted. Only use your regular password and 2FA.`
|
||||
|
||||
`rclone config` walks you through the token creation. The trust token is valid for 30 days. After which you will have to reauthenticate with rclone reconnect or rclone config.
|
||||
|
||||
Here is an example of how to make a remote called `iclouddrive`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> iclouddrive
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / iCloud Drive
|
||||
\ (iclouddrive)
|
||||
[snip]
|
||||
Storage> iclouddrive
|
||||
Option apple_id.
|
||||
Apple ID.
|
||||
Enter a value.
|
||||
apple_id> APPLEID
|
||||
Option password.
|
||||
Password.
|
||||
Choose an alternative below.
|
||||
y) Yes, type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Option config_2fa.
|
||||
Two-factor authentication: please enter your 2FA code
|
||||
Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
- cookies: ****************************
|
||||
- trust_token: ****************************
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
## Advanced Data Protection
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to iclouddrive (iCloud Drive).
|
||||
|
||||
#### --iclouddrive-apple-id
|
||||
|
||||
Apple ID.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: apple_id
|
||||
- Env Var: RCLONE_ICLOUDDRIVE_APPLE_ID
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --iclouddrive-password
|
||||
|
||||
Password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_ICLOUDDRIVE_PASSWORD
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --iclouddrive-trust-token
|
||||
|
||||
trust token (internal use)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: trust_token
|
||||
- Env Var: RCLONE_ICLOUDDRIVE_TRUST_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --iclouddrive-cookies
|
||||
|
||||
cookies (internal use only)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: cookies
|
||||
- Env Var: RCLONE_ICLOUDDRIVE_COOKIES
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to iclouddrive (iCloud Drive).
|
||||
|
||||
#### --iclouddrive-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_ICLOUDDRIVE_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
#### --iclouddrive-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_ICLOUDDRIVE_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
|
@ -33,6 +33,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||
| HDFS | - | R/W | No | No | - | - |
|
||||
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
|
||||
| HTTP | - | R | No | No | R | - |
|
||||
| iCloud Drive | - | R | No | No | - | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
|
@ -505,12 +506,13 @@ upon backend-specific capabilities.
|
|||
| Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No | No | No | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | No | Yes | No | No | No | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| Google Photos | No | No | No | No | No | No | No | No | No | No | No |
|
||||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes |
|
||||
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| iCloud Drive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
|
@ -521,7 +523,7 @@ upon backend-specific capabilities.
|
|||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No |
|
||||
| Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
|
|
|
@ -111,29 +111,68 @@ Properties:
|
|||
|
||||
Here are the Advanced options specific to pikpak (PikPak).
|
||||
|
||||
#### --pikpak-device-id
|
||||
#### --pikpak-client-id
|
||||
|
||||
Device ID used for authorization.
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: device_id
|
||||
- Env Var: RCLONE_PIKPAK_DEVICE_ID
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-user-agent
|
||||
#### --pikpak-client-secret
|
||||
|
||||
HTTP user agent for pikpak.
|
||||
OAuth Client Secret.
|
||||
|
||||
Defaults to "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" or "--pikpak-user-agent" provided on command line.
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user_agent
|
||||
- Env Var: RCLONE_PIKPAK_USER_AGENT
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_PIKPAK_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-root-folder-id
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ The S3 backend can be used with a number of different providers:
|
|||
{{< provider name="Linode Object Storage" home="https://www.linode.com/products/object-storage/" config="/s3/#linode" >}}
|
||||
{{< provider name="Magalu Object Storage" home="https://magalu.cloud/object-storage/" config="/s3/#magalu" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
|
@ -34,6 +35,7 @@ The S3 backend can be used with a number of different providers:
|
|||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/s3/#storj" >}}
|
||||
{{< provider name="Synology C2 Object Storage" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
|
||||
|
@ -2294,6 +2296,21 @@ You can also do this entirely on the command line
|
|||
|
||||
This is the provider used as main example and described in the [configuration](#configuration) section above.
|
||||
|
||||
### AWS Directory Buckets
|
||||
|
||||
From rclone v1.69 [Directory Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
|
||||
are supported.
|
||||
|
||||
You will need to set the `directory_buckets = true` config parameter
|
||||
or use `--s3-directory-buckets`.
|
||||
|
||||
Note that rclone cannot yet:
|
||||
|
||||
- Create directory buckets
|
||||
- List directory buckets
|
||||
|
||||
See [the --s3-directory-buckets flag](#s3-directory-buckets) for more info
|
||||
|
||||
### AWS Snowball Edge
|
||||
|
||||
[AWS Snowball](https://aws.amazon.com/snowball/) is a hardware
|
||||
|
@ -2486,6 +2503,9 @@ Note that Cloudflare decompresses files uploaded with
|
|||
does. If this is causing a problem then upload the files with
|
||||
`--header-upload "Cache-Control: no-transform"`
|
||||
|
||||
A consequence of this is that `Content-Encoding: gzip` will never
|
||||
appear in the metadata on Cloudflare.
|
||||
|
||||
### Dreamhost
|
||||
|
||||
Dreamhost [DreamObjects](https://www.dreamhost.com/cloud/storage/) is
|
||||
|
@ -3210,6 +3230,168 @@ So once set up, for example, to copy files into a bucket
|
|||
rclone copy /path/to/files minio:bucket
|
||||
```
|
||||
|
||||
### Outscale
|
||||
|
||||
[OUTSCALE Object Storage (OOS)](https://en.outscale.com/storage/outscale-object-storage/) is an enterprise-grade, S3-compatible storage service provided by OUTSCALE, a brand of Dassault Systèmes. For more information about OOS, see the [official documentation](https://docs.outscale.com/en/userguide/OUTSCALE-Object-Storage-OOS.html).
|
||||
|
||||
Here is an example of an OOS configuration that you can paste into your rclone configuration file:
|
||||
|
||||
```
|
||||
[outscale]
|
||||
type = s3
|
||||
provider = Outscale
|
||||
env_auth = false
|
||||
access_key_id = ABCDEFGHIJ0123456789
|
||||
secret_access_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
region = eu-west-2
|
||||
endpoint = oos.eu-west-2.outscale.com
|
||||
acl = private
|
||||
```
|
||||
|
||||
You can also run `rclone config` to go through the interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
```
|
||||
|
||||
```
|
||||
Enter name for new remote.
|
||||
name> outscale
|
||||
```
|
||||
|
||||
```
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
X / Amazon S3 Compliant Storage Providers including AWS, ...Outscale, ...and others
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> outscale
|
||||
```
|
||||
|
||||
```
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[snip]
|
||||
XX / OUTSCALE Object Storage (OOS)
|
||||
\ (Outscale)
|
||||
[snip]
|
||||
provider> Outscale
|
||||
```
|
||||
|
||||
```
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth>
|
||||
```
|
||||
|
||||
```
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> ABCDEFGHIJ0123456789
|
||||
```
|
||||
|
||||
```
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
```
|
||||
|
||||
```
|
||||
Option region.
|
||||
Region where your bucket will be created and your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Paris, France
|
||||
\ (eu-west-2)
|
||||
2 / New Jersey, USA
|
||||
\ (us-east-2)
|
||||
3 / California, USA
|
||||
\ (us-west-1)
|
||||
4 / SecNumCloud, Paris, France
|
||||
\ (cloudgouv-eu-west-1)
|
||||
5 / Tokyo, Japan
|
||||
\ (ap-northeast-1)
|
||||
region> 1
|
||||
```
|
||||
|
||||
```
|
||||
Option endpoint.
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Outscale EU West 2 (Paris)
|
||||
\ (oos.eu-west-2.outscale.com)
|
||||
2 / Outscale US east 2 (New Jersey)
|
||||
\ (oos.us-east-2.outscale.com)
|
||||
3 / Outscale EU West 1 (California)
|
||||
\ (oos.us-west-1.outscale.com)
|
||||
4 / Outscale SecNumCloud (Paris)
|
||||
\ (oos.cloudgouv-eu-west-1.outscale.com)
|
||||
5 / Outscale AP Northeast 1 (Japan)
|
||||
\ (oos.ap-northeast-1.outscale.com)
|
||||
endpoint> 1
|
||||
```
|
||||
|
||||
```
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
[snip]
|
||||
acl> 1
|
||||
```
|
||||
|
||||
```
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
```
|
||||
|
||||
```
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: Outscale
|
||||
- access_key_id: ABCDEFGHIJ0123456789
|
||||
- secret_access_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
- endpoint: oos.eu-west-2.outscale.com
|
||||
Keep this "outscale" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Qiniu Cloud Object Storage (Kodo) {#qiniu}
|
||||
|
||||
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.
|
||||
|
@ -3672,6 +3854,125 @@ So once set up, for example to copy files into a bucket
|
|||
rclone copy /path/to/files seaweedfs_s3:foo
|
||||
```
|
||||
|
||||
### Selectel
|
||||
|
||||
[Selectel Cloud Storage](https://selectel.ru/services/cloud/storage/)
|
||||
is an S3 compatible storage system which features triple redundancy
|
||||
storage, automatic scaling, high availability and a comprehensive IAM
|
||||
system.
|
||||
|
||||
Selectel have a section on their website for [configuring
|
||||
rclone](https://docs.selectel.ru/en/cloud/object-storage/tools/rclone/)
|
||||
which shows how to make the right API keys.
|
||||
|
||||
From rclone v1.69 Selectel is a supported operator - please choose the
|
||||
`Selectel` provider type.
|
||||
|
||||
Note that you should use "vHosted" access for the buckets (which is
|
||||
the recommended default), not "path style".
|
||||
|
||||
You can use `rclone config` to make a new provider like this
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> selectel
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Amazon S3 Compliant Storage Providers including ..., Selectel, ...
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[snip]
|
||||
XX / Selectel Object Storage
|
||||
\ (Selectel)
|
||||
[snip]
|
||||
provider> Selectel
|
||||
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth> 1
|
||||
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> ACCESS_KEY
|
||||
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> SECRET_ACCESS_KEY
|
||||
|
||||
Option region.
|
||||
Region where your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / St. Petersburg
|
||||
\ (ru-1)
|
||||
region> 1
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for Selectel Object Storage.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Saint Petersburg
|
||||
\ (s3.ru-1.storage.selcloud.ru)
|
||||
endpoint> 1
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: Selectel
|
||||
- access_key_id: ACCESS_KEY
|
||||
- secret_access_key: SECRET_ACCESS_KEY
|
||||
- region: ru-1
|
||||
- endpoint: s3.ru-1.storage.selcloud.ru
|
||||
Keep this "selectel" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
And your config should end up looking like this:
|
||||
|
||||
```
|
||||
[selectel]
|
||||
type = s3
|
||||
provider = Selectel
|
||||
access_key_id = ACCESS_KEY
|
||||
secret_access_key = SECRET_ACCESS_KEY
|
||||
region = ru-1
|
||||
endpoint = s3.ru-1.storage.selcloud.ru
|
||||
```
|
||||
|
||||
### Wasabi
|
||||
|
||||
[Wasabi](https://wasabi.com) is a cloud-based object storage service for a
|
||||
|
|
|
@ -156,7 +156,7 @@ and the public key built into it will be used during the authentication process.
|
|||
If you have a certificate you may use it to sign your public key, creating a
|
||||
separate SSH user certificate that should be used instead of the plain public key
|
||||
extracted from the private key. Then you must provide the path to the
|
||||
user certificate public key file in `pubkey_file`.
|
||||
user certificate public key file in `pubkey_file` or the content of the file in `pubkey`.
|
||||
|
||||
Note: This is not the traditional public key paired with your private key,
|
||||
typically saved as `/home/$USER/.ssh/id_rsa.pub`. Setting this path in
|
||||
|
@ -318,29 +318,30 @@ is able to use checksumming if the same login has shell access,
|
|||
and can execute remote commands. If there is a command that can
|
||||
calculate compatible checksums on the remote system, Rclone can
|
||||
then be configured to execute this whenever a checksum is needed,
|
||||
and read back the results. Currently MD5 and SHA-1 are supported.
|
||||
and read back the results. By default MD5 and SHA-1 are considered,
|
||||
but also CRC32, SHA-256, BLAKE3, XXH3 and XXH128 are supported,
|
||||
option `hashes` can be set to specify which to consider.
|
||||
|
||||
Normally this requires an external utility being available on
|
||||
the server. By default rclone will try commands `md5sum`, `md5`
|
||||
and `rclone md5sum` for MD5 checksums, and the first one found usable
|
||||
will be picked. Same with `sha1sum`, `sha1` and `rclone sha1sum`
|
||||
commands for SHA-1 checksums. These utilities normally need to
|
||||
be in the remote's PATH to be found.
|
||||
the server. E.g. for MD5 checksums, by default rclone will try commands
|
||||
`md5sum`, `md5` and `rclone md5sum`, and the first one found
|
||||
usable will be picked. These utilities normally need to be in the
|
||||
remote's PATH to be found.
|
||||
|
||||
In some cases the shell itself is capable of calculating checksums.
|
||||
PowerShell is an example of such a shell. If rclone detects that the
|
||||
remote shell is PowerShell, which means it most probably is a
|
||||
Windows OpenSSH server, rclone will use a predefined script block
|
||||
to produce the checksums when no external checksum commands are found
|
||||
(see [shell access](#shell-access)). This assumes PowerShell version
|
||||
4.0 or newer.
|
||||
to produce the checksums for MD5, SHA-1 and SHA-256 when no external
|
||||
checksum commands are found (see [shell access](#shell-access)). This
|
||||
assumes PowerShell version 4.0 or newer.
|
||||
|
||||
The options `md5sum_command` and `sha1_command` can be used to customize
|
||||
the command to be executed for calculation of checksums. You can for
|
||||
example set a specific path to where md5sum and sha1sum executables
|
||||
are located, or use them to specify some other tools that print checksums
|
||||
in compatible format. The value can include command-line arguments,
|
||||
or even shell script blocks as with PowerShell. Rclone has subcommands
|
||||
The options `md5sum_command`, `sha1_command`, etc. can be used to customize
|
||||
the commands to be executed for calculation of checksums. You can for
|
||||
example set a specific path to where the md5sum executable are located,
|
||||
or specify some other tool that print checksums in compatible format.
|
||||
The value can include command-line arguments, or even shell script blocks
|
||||
as with PowerShell. Rclone has subcommands [hashsum](/commands/rclone_hashsum/),
|
||||
[md5sum](/commands/rclone_md5sum/) and [sha1sum](/commands/rclone_sha1sum/)
|
||||
that use compatible format, which means if you have an rclone executable
|
||||
on the server it can be used. As mentioned above, they will be automatically
|
||||
|
@ -356,11 +357,14 @@ configuration, so next time it will use the same. Value `none`
|
|||
will be set if none of the default commands could be used for a specific
|
||||
algorithm, and this algorithm will not be supported by the remote.
|
||||
|
||||
Disabling the checksumming may be required if you are connecting to SFTP servers
|
||||
which are not under your control, and to which the execution of remote shell
|
||||
commands is prohibited. Set the configuration option `disable_hashcheck`
|
||||
to `true` to disable checksumming entirely, or set `shell_type` to `none`
|
||||
to disable all functionality based on remote shell command execution.
|
||||
Disabling the checksumming completely may be required if you are connecting to
|
||||
SFTP servers which are not under your control, and to which the execution of
|
||||
remote shell commands is prohibited. Set the configuration option `disable_hashcheck`
|
||||
to `true` to disable checksumming entirely (you get the same effect by setting
|
||||
option `hashes` to `none` or options `md5sum_command`, `sha1_command` etc.
|
||||
to `none`). Set option `shell_type` to `none` to not only disable checksumming,
|
||||
but also disable all other functionality that are based on remote shell command
|
||||
execution.
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
|
@ -494,6 +498,19 @@ Properties:
|
|||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --sftp-pubkey
|
||||
|
||||
SSH public certificate for public certificate based authentication.
|
||||
Set this if you have a signed certificate you want to use for authentication.
|
||||
If specified will override pubkey_file.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pubkey
|
||||
- Env Var: RCLONE_SFTP_PUBKEY
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --sftp-pubkey-file
|
||||
|
||||
Optional path to public key file.
|
||||
|
|
|
@ -224,6 +224,17 @@ Properties:
|
|||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --zoho-upload-cutoff
|
||||
|
||||
Cutoff for switching to large file upload api (>= 10 MiB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_ZOHO_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 10Mi
|
||||
|
||||
#### --zoho-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
|
|
@ -11,7 +11,9 @@
|
|||
<title>{{ block "title" . }}{{ .Title }}{{ end }}</title>
|
||||
<link rel="canonical" href="{{ .Permalink }}">
|
||||
<link href="/css/bootstrap.min.4.4.1.css" rel="stylesheet">
|
||||
<link href="/css/font-awesome.min.5.10.2.css" rel="stylesheet">
|
||||
<link href="/fontawesome/css/fontawesome.min.css" rel="stylesheet">
|
||||
<link href="/fontawesome/css/brands.min.css" rel="stylesheet">
|
||||
<link href="/fontawesome/css/solid.min.css" rel="stylesheet">
|
||||
<link href="/css/custom.css?r={{ .Date.Unix }}" rel="stylesheet">
|
||||
{{ $RSSLink := "" }}{{ with .OutputFormats.Get "RSS" }}{{ $RSSLink = .RelPermalink }}{{ end }}{{ if $RSSLink }}<link href="{{ $RSSLink }}" rel="alternate" type="application/rss+xml" title="{{ .Site.Title }}" />{{ end }}
|
||||
</head>
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
|
||||
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/filescom/"><i class="fa fa-file-alt fa-fw"></i> Files.com</a>
|
||||
<a class="dropdown-item" href="/filescom/"><i class="fa fa-brands fa-files-pinwheel fa-fw"></i> Files.com</a>
|
||||
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a>
|
||||
<a class="dropdown-item" href="/gofile/"><i class="fa fa-folder fa-fw"></i> Gofile</a>
|
||||
<a class="dropdown-item" href="/googlecloudstorage/"><i class="fab fa-google fa-fw"></i> Google Cloud Storage</a>
|
||||
|
@ -75,6 +75,7 @@
|
|||
<a class="dropdown-item" href="/hdfs/"><i class="fa fa-globe fa-fw"></i> HDFS (Hadoop Distributed Filesystem)</a>
|
||||
<a class="dropdown-item" href="/hidrive/"><i class="fa fa-cloud fa-fw"></i> HiDrive</a>
|
||||
<a class="dropdown-item" href="/http/"><i class="fa fa-globe fa-fw"></i> HTTP</a>
|
||||
<a class="dropdown-item" href="/iclouddrive/"><i class="fa fa-archive fa-fw"></i> iCloud Drive</a>
|
||||
<a class="dropdown-item" href="/imagekit/"><i class="fa fa-cloud fa-fw"></i> ImageKit</a>
|
||||
<a class="dropdown-item" href="/internetarchive/"><i class="fa fa-archive fa-fw"></i> Internet Archive</a>
|
||||
<a class="dropdown-item" href="/jottacloud/"><i class="fa fa-cloud fa-fw"></i> Jottacloud</a>
|
||||
|
|
|
@ -1 +1 @@
|
|||
v1.68.2
|
||||
v1.69.0
|
5
docs/static/css/font-awesome.min.5.10.2.css
vendored
5
docs/static/css/font-awesome.min.5.10.2.css
vendored
File diff suppressed because one or more lines are too long
6
docs/static/fontawesome/css/brands.min.css
vendored
Normal file
6
docs/static/fontawesome/css/brands.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
9
docs/static/fontawesome/css/fontawesome.min.css
vendored
Normal file
9
docs/static/fontawesome/css/fontawesome.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
6
docs/static/fontawesome/css/solid.min.css
vendored
Normal file
6
docs/static/fontawesome/css/solid.min.css
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
/*!
|
||||
* Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
:host,:root{--fa-style-family-classic:"Font Awesome 6 Free";--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}.fa-solid,.fas{font-weight:900}
|
BIN
docs/static/fontawesome/webfonts/fa-brands-400.ttf
vendored
Normal file
BIN
docs/static/fontawesome/webfonts/fa-brands-400.ttf
vendored
Normal file
Binary file not shown.
BIN
docs/static/fontawesome/webfonts/fa-brands-400.woff2
vendored
Normal file
BIN
docs/static/fontawesome/webfonts/fa-brands-400.woff2
vendored
Normal file
Binary file not shown.
BIN
docs/static/fontawesome/webfonts/fa-solid-900.ttf
vendored
Normal file
BIN
docs/static/fontawesome/webfonts/fa-solid-900.ttf
vendored
Normal file
Binary file not shown.
BIN
docs/static/fontawesome/webfonts/fa-solid-900.woff2
vendored
Normal file
BIN
docs/static/fontawesome/webfonts/fa-solid-900.woff2
vendored
Normal file
Binary file not shown.
BIN
docs/static/fontawesome/webfonts/fa-v4compatibility.ttf
vendored
Normal file
BIN
docs/static/fontawesome/webfonts/fa-v4compatibility.ttf
vendored
Normal file
Binary file not shown.
BIN
docs/static/fontawesome/webfonts/fa-v4compatibility.woff2
vendored
Normal file
BIN
docs/static/fontawesome/webfonts/fa-v4compatibility.woff2
vendored
Normal file
Binary file not shown.
BIN
docs/static/webfonts/fa-brands-400.eot
vendored
BIN
docs/static/webfonts/fa-brands-400.eot
vendored
Binary file not shown.
3449
docs/static/webfonts/fa-brands-400.svg
vendored
3449
docs/static/webfonts/fa-brands-400.svg
vendored
File diff suppressed because it is too large
Load diff
Before Width: | Height: | Size: 675 KiB |
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue