Compare commits
28 commits
tcl/master
...
v1.56-stab
Author | SHA1 | Date | |
---|---|---|---|
|
4dfcf6899d | ||
|
81b3075e9d | ||
|
f988773230 | ||
|
e75d57d638 | ||
|
0e26fda001 | ||
|
86f13fa46b | ||
|
b9cf451177 | ||
|
b64258d92e | ||
|
7308100695 | ||
|
9c27e080b3 | ||
|
21d84df81b | ||
|
3f17c729f1 | ||
|
d77f594ee7 | ||
|
478434ffef | ||
|
df52896a0e | ||
|
29a99205ec | ||
|
693f674f39 | ||
|
a506373ca1 | ||
|
a8ba15b90a | ||
|
b26308c427 | ||
|
03bcf81c5e | ||
|
8c1d4f17a8 | ||
|
e87de7c7e3 | ||
|
7a31ef783a | ||
|
d0de426500 | ||
|
34f89043af | ||
|
3ba001f8d7 | ||
|
6bbf46961c |
209 changed files with 934 additions and 161 deletions
14
.github/workflows/build.yml
vendored
14
.github/workflows/build.yml
vendored
|
@ -241,14 +241,14 @@ jobs:
|
|||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.14
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
|
@ -279,7 +279,7 @@ jobs:
|
|||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
|
@ -292,7 +292,7 @@ jobs:
|
|||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
|
@ -305,7 +305,7 @@ jobs:
|
|||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
|
@ -318,7 +318,7 @@ jobs:
|
|||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
|
|
89
MANUAL.html
generated
89
MANUAL.html
generated
|
@ -17,7 +17,7 @@
|
|||
<header id="title-block-header">
|
||||
<h1 class="title">rclone(1) User Manual</h1>
|
||||
<p class="author">Nick Craig-Wood</p>
|
||||
<p class="date">Jul 20, 2021</p>
|
||||
<p class="date">Oct 01, 2021</p>
|
||||
</header>
|
||||
<h1 id="rclone-syncs-your-files-to-cloud-storage">Rclone syncs your files to cloud storage</h1>
|
||||
<p><img width="50%" src="https://rclone.org/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" ></p>
|
||||
|
@ -7243,7 +7243,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
|||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.2")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)</code></pre>
|
||||
<h2 id="backend-flags">Backend Flags</h2>
|
||||
<p>These flags are available for every command. They control the backends and may be set in the config file.</p>
|
||||
|
@ -8500,7 +8500,7 @@ e) Edit this remote
|
|||
d) Delete this remote
|
||||
y/e/d> </code></pre>
|
||||
<h3 id="modified-time">Modified time</h3>
|
||||
<p>The modified time is stored as metadata on the object as <code>X-Amz-Meta-Mtime</code> as floating point since the epoch accurate to 1 ns.</p>
|
||||
<p>The modified time is stored as metadata on the object as <code>X-Amz-Meta-Mtime</code> as floating point since the epoch, accurate to 1 ns.</p>
|
||||
<p>If the modification time needs to be updated rclone will attempt to perform a server side copy to update the modification if the object can be copied in a single part. In the case the object is larger than 5Gb or is in Glacier or Glacier Deep Archive storage the object will be uploaded rather than copied.</p>
|
||||
<p>Note that reading this from the object takes an additional <code>HEAD</code> request as the metadata isn't returned in object listings.</p>
|
||||
<h3 id="reducing-costs">Reducing costs</h3>
|
||||
|
@ -9520,6 +9520,10 @@ y/e/d> </code></pre>
|
|||
<ul>
|
||||
<li>Wasabi EU Central endpoint</li>
|
||||
</ul></li>
|
||||
<li>"s3.ap-northeast-1.wasabisys.com"
|
||||
<ul>
|
||||
<li>Wasabi AP Northeast endpoint</li>
|
||||
</ul></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h4 id="s3-location-constraint">--s3-location-constraint</h4>
|
||||
|
@ -14409,7 +14413,7 @@ client_secret> # Can be left blank
|
|||
scope> # Select your scope, 1 for example
|
||||
root_folder_id> # Can be left blank
|
||||
service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes!
|
||||
y/n> # Auto config, y
|
||||
y/n> # Auto config, n
|
||||
</code></pre>
|
||||
<h5 id="verify-that-its-working">4. Verify that it's working</h5>
|
||||
<ul>
|
||||
|
@ -14422,7 +14426,7 @@ y/n> # Auto config, y
|
|||
<li><code>gdrive:backup</code> - use the remote called gdrive, work in the folder named backup.</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<p>Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using <code>--drive-impersonate</code>, do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step #1 - use rclone without specifying the <code>--drive-impersonate</code> option, like this: <code>rclone -v foo@example.com lsf gdrive:backup</code></p>
|
||||
<p>Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using <code>--drive-impersonate</code>, do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step #1 - use rclone without specifying the <code>--drive-impersonate</code> option, like this: <code>rclone -v lsf gdrive:backup</code></p>
|
||||
<h3 id="shared-drives-team-drives">Shared drives (team drives)</h3>
|
||||
<p>If you want to configure the remote to point to a Google Shared Drive (previously known as Team Drives) then answer <code>y</code> to the question <code>Configure this as a Shared Drive (Team Drive)?</code>.</p>
|
||||
<p>This will fetch the list of Shared Drives from google and allow you to configure which one you want to use. You can also type in a Shared Drive ID if you prefer.</p>
|
||||
|
@ -17187,7 +17191,7 @@ y/e/d> y</code></pre>
|
|||
<ol type="1">
|
||||
<li>Open https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade and then click <code>New registration</code>.</li>
|
||||
<li>Enter a name for your app, choose account type <code>Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)</code>, select <code>Web</code> in <code>Redirect URI</code>, then type (do not copy and paste) <code>http://localhost:53682/</code> and click Register. Copy and keep the <code>Application (client) ID</code> under the app name for later use.</li>
|
||||
<li>Under <code>manage</code> select <code>Certificates & secrets</code>, click <code>New client secret</code>. Copy and keep that secret for later use.</li>
|
||||
<li>Under <code>manage</code> select <code>Certificates & secrets</code>, click <code>New client secret</code>. Enter a description (can be anything) and set <code>Expires</code> to 24 months. Copy and keep that secret <em>Value</em> for later use (you <em>won't</em> be able to see this value afterwards).</li>
|
||||
<li>Under <code>manage</code> select <code>API permissions</code>, click <code>Add a permission</code> and select <code>Microsoft Graph</code> then select <code>delegated permissions</code>.</li>
|
||||
<li>Search and select the following permissions: <code>Files.Read</code>, <code>Files.ReadWrite</code>, <code>Files.Read.All</code>, <code>Files.ReadWrite.All</code>, <code>offline_access</code>, <code>User.Read</code>. Once selected click <code>Add permissions</code> at the bottom.</li>
|
||||
</ol>
|
||||
|
@ -17557,6 +17561,8 @@ Description: Using application 'rclone' is currently not supported for y
|
|||
Code: AADSTS50076
|
||||
Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.</code></pre>
|
||||
<p>If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run <code>rclone config</code>, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: <code>Already have a token - refresh?</code>. For this question, answer <code>y</code> and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.</p>
|
||||
<h4 id="invalid-request-when-making-public-links">Invalid request when making public links</h4>
|
||||
<p>On Sharepoint and OneDrive for Business, <code>rclone link</code> may return an "Invalid request" error. A possible cause is that the organisation admin didn't allow public links to be made for the organisation/sharepoint library. To fix the permissions as an admin, take a look at the docs: <a href="https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off">1</a>, <a href="https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3">2</a>.</p>
|
||||
<h1 id="opendrive">OpenDrive</h1>
|
||||
<p>Paths are specified as <code>remote:path</code></p>
|
||||
<p>Paths may be as deep as required, e.g. <code>remote:directory/subdirectory</code>.</p>
|
||||
|
@ -21308,6 +21314,77 @@ $ tree /tmp/b
|
|||
<li>"error": return an error based on option value</li>
|
||||
</ul>
|
||||
<h1 id="changelog">Changelog</h1>
|
||||
<h2 id="v1.56.2---2021-10-01">v1.56.2 - 2021-10-01</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.56.1...v1.56.2">See commits</a></p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>serve http: Re-add missing auth to http service (Nolan Woods)</li>
|
||||
<li>build: Update golang.org/x/sys to fix crash on macOS when compiled with go1.17 (Herby Gillot)</li>
|
||||
</ul></li>
|
||||
<li>FTP
|
||||
<ul>
|
||||
<li>Fix deadlock after failed update when concurrency=1 (Ivan Andreev)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.56.1---2021-09-19">v1.56.1 - 2021-09-19</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.56.0...v1.56.1">See commits</a></p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>accounting: Fix maximum bwlimit by scaling scale max token bucket size (Nick Craig-Wood)</li>
|
||||
<li>rc: Fix speed does not update in core/stats (negative0)</li>
|
||||
<li>selfupdate: Fix --quiet option, not quite quiet (yedamo)</li>
|
||||
<li>serve http: Fix <code>serve http</code> exiting directly after starting (Cnly)</li>
|
||||
<li>build
|
||||
<ul>
|
||||
<li>Apply gofmt from golang 1.17 (Ivan Andreev)</li>
|
||||
<li>Update Go to 1.16 and NDK to 22b for android/any (x0b)</li>
|
||||
</ul></li>
|
||||
</ul></li>
|
||||
<li>Mount
|
||||
<ul>
|
||||
<li>Fix <code>--daemon</code> mode (Ivan Andreev)</li>
|
||||
</ul></li>
|
||||
<li>VFS
|
||||
<ul>
|
||||
<li>Fix duplicates on rename (Nick Craig-Wood)</li>
|
||||
<li>Fix crash when truncating a just uploaded object (Nick Craig-Wood)</li>
|
||||
<li>Fix issue where empty dirs would build up in cache meta dir (albertony)</li>
|
||||
</ul></li>
|
||||
<li>Drive
|
||||
<ul>
|
||||
<li>Fix instructions for auto config (Greg Sadetsky)</li>
|
||||
<li>Fix lsf example without drive-impersonate (Greg Sadetsky)</li>
|
||||
</ul></li>
|
||||
<li>Onedrive
|
||||
<ul>
|
||||
<li>Handle HTTP 400 better in PublicLink (Alex Chen)</li>
|
||||
<li>Clarification of the process for creating custom client_id (Mariano Absatz)</li>
|
||||
</ul></li>
|
||||
<li>Pcloud
|
||||
<ul>
|
||||
<li>Return an early error when Put is called with an unknown size (Nick Craig-Wood)</li>
|
||||
<li>Try harder to delete a failed upload (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Add Wasabi's AP-Northeast endpoint info (hota)</li>
|
||||
<li>Fix typo in s3 documentation (Greg Sadetsky)</li>
|
||||
</ul></li>
|
||||
<li>Seafile
|
||||
<ul>
|
||||
<li>Fix 2fa config state machine (Fred)</li>
|
||||
</ul></li>
|
||||
<li>SFTP
|
||||
<ul>
|
||||
<li>Remove spurious error message on <code>--sftp-disable-concurrent-reads</code> (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Sugarsync
|
||||
<ul>
|
||||
<li>Fix initial connection after config re-arrangement (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.56.0---2021-07-20">v1.56.0 - 2021-07-20</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.55.0...v1.56.0">See commits</a></p>
|
||||
<ul>
|
||||
|
|
70
MANUAL.md
generated
70
MANUAL.md
generated
|
@ -1,6 +1,6 @@
|
|||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jul 20, 2021
|
||||
% Oct 01, 2021
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
|
@ -12650,7 +12650,7 @@ These flags are available for every command.
|
|||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.2")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
|
@ -14452,7 +14452,7 @@ y/e/d>
|
|||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch, accurate to 1 ns.
|
||||
|
||||
If the modification time needs to be updated rclone will attempt to perform a server
|
||||
side copy to update the modification if the object can be copied in a single part.
|
||||
|
@ -15249,6 +15249,8 @@ Required when using an S3 clone.
|
|||
- Wasabi US West endpoint
|
||||
- "s3.eu-central-1.wasabisys.com"
|
||||
- Wasabi EU Central endpoint
|
||||
- "s3.ap-northeast-1.wasabisys.com"
|
||||
- Wasabi AP Northeast endpoint
|
||||
|
||||
#### --s3-location-constraint
|
||||
|
||||
|
@ -21789,7 +21791,7 @@ client_secret> # Can be left blank
|
|||
scope> # Select your scope, 1 for example
|
||||
root_folder_id> # Can be left blank
|
||||
service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes!
|
||||
y/n> # Auto config, y
|
||||
y/n> # Auto config, n
|
||||
|
||||
```
|
||||
|
||||
|
@ -21806,7 +21808,7 @@ the folder named backup.
|
|||
Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using `--drive-impersonate`, do this instead:
|
||||
- in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step #1
|
||||
- use rclone without specifying the `--drive-impersonate` option, like this:
|
||||
`rclone -v foo@example.com lsf gdrive:backup`
|
||||
`rclone -v lsf gdrive:backup`
|
||||
|
||||
|
||||
### Shared drives (team drives) ###
|
||||
|
@ -25525,7 +25527,7 @@ Client ID and Key by following the steps below:
|
|||
|
||||
1. Open https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade and then click `New registration`.
|
||||
2. Enter a name for your app, choose account type `Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)`, select `Web` in `Redirect URI`, then type (do not copy and paste) `http://localhost:53682/` and click Register. Copy and keep the `Application (client) ID` under the app name for later use.
|
||||
3. Under `manage` select `Certificates & secrets`, click `New client secret`. Copy and keep that secret for later use.
|
||||
3. Under `manage` select `Certificates & secrets`, click `New client secret`. Enter a description (can be anything) and set `Expires` to 24 months. Copy and keep that secret _Value_ for later use (you _won't_ be able to see this value afterwards).
|
||||
4. Under `manage` select `API permissions`, click `Add a permission` and select `Microsoft Graph` then select `delegated permissions`.
|
||||
5. Search and select the following permissions: `Files.Read`, `Files.ReadWrite`, `Files.Read.All`, `Files.ReadWrite.All`, `offline_access`, `User.Read`. Once selected click `Add permissions` at the bottom.
|
||||
|
||||
|
@ -25980,6 +25982,15 @@ Description: Due to a configuration change made by your administrator, or becaus
|
|||
|
||||
If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
|
||||
|
||||
#### Invalid request when making public links ####
|
||||
|
||||
On Sharepoint and OneDrive for Business, `rclone link` may return an "Invalid
|
||||
request" error. A possible cause is that the organisation admin didn't allow
|
||||
public links to be made for the organisation/sharepoint library. To fix the
|
||||
permissions as an admin, take a look at the docs:
|
||||
[1](https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off),
|
||||
[2](https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3).
|
||||
|
||||
# OpenDrive
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
@ -30700,6 +30711,53 @@ Options:
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.56.2 - 2021-10-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.56.1...v1.56.2)
|
||||
|
||||
* Bug Fixes
|
||||
* serve http: Re-add missing auth to http service (Nolan Woods)
|
||||
* build: Update golang.org/x/sys to fix crash on macOS when compiled with go1.17 (Herby Gillot)
|
||||
* FTP
|
||||
* Fix deadlock after failed update when concurrency=1 (Ivan Andreev)
|
||||
|
||||
## v1.56.1 - 2021-09-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.56.0...v1.56.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix maximum bwlimit by scaling scale max token bucket size (Nick Craig-Wood)
|
||||
* rc: Fix speed does not update in core/stats (negative0)
|
||||
* selfupdate: Fix --quiet option, not quite quiet (yedamo)
|
||||
* serve http: Fix `serve http` exiting directly after starting (Cnly)
|
||||
* build
|
||||
* Apply gofmt from golang 1.17 (Ivan Andreev)
|
||||
* Update Go to 1.16 and NDK to 22b for android/any (x0b)
|
||||
* Mount
|
||||
* Fix `--daemon` mode (Ivan Andreev)
|
||||
* VFS
|
||||
* Fix duplicates on rename (Nick Craig-Wood)
|
||||
* Fix crash when truncating a just uploaded object (Nick Craig-Wood)
|
||||
* Fix issue where empty dirs would build up in cache meta dir (albertony)
|
||||
* Drive
|
||||
* Fix instructions for auto config (Greg Sadetsky)
|
||||
* Fix lsf example without drive-impersonate (Greg Sadetsky)
|
||||
* Onedrive
|
||||
* Handle HTTP 400 better in PublicLink (Alex Chen)
|
||||
* Clarification of the process for creating custom client_id (Mariano Absatz)
|
||||
* Pcloud
|
||||
* Return an early error when Put is called with an unknown size (Nick Craig-Wood)
|
||||
* Try harder to delete a failed upload (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add Wasabi's AP-Northeast endpoint info (hota)
|
||||
* Fix typo in s3 documentation (Greg Sadetsky)
|
||||
* Seafile
|
||||
* Fix 2fa config state machine (Fred)
|
||||
* SFTP
|
||||
* Remove spurious error message on `--sftp-disable-concurrent-reads` (Nick Craig-Wood)
|
||||
* Sugarsync
|
||||
* Fix initial connection after config re-arrangement (Nick Craig-Wood)
|
||||
|
||||
## v1.56.0 - 2021-07-20
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.55.0...v1.56.0)
|
||||
|
|
84
MANUAL.txt
generated
84
MANUAL.txt
generated
|
@ -1,6 +1,6 @@
|
|||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Jul 20, 2021
|
||||
Oct 01, 2021
|
||||
|
||||
|
||||
|
||||
|
@ -12805,7 +12805,7 @@ These flags are available for every command.
|
|||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.2")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
|
||||
|
||||
|
@ -14561,7 +14561,7 @@ This will guide you through an interactive setup process.
|
|||
Modified time
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
X-Amz-Meta-Mtime as floating point since the epoch accurate to 1 ns.
|
||||
X-Amz-Meta-Mtime as floating point since the epoch, accurate to 1 ns.
|
||||
|
||||
If the modification time needs to be updated rclone will attempt to
|
||||
perform a server side copy to update the modification if the object can
|
||||
|
@ -15362,6 +15362,8 @@ Endpoint for S3 API. Required when using an S3 clone.
|
|||
- Wasabi US West endpoint
|
||||
- "s3.eu-central-1.wasabisys.com"
|
||||
- Wasabi EU Central endpoint
|
||||
- "s3.ap-northeast-1.wasabisys.com"
|
||||
- Wasabi AP Northeast endpoint
|
||||
|
||||
--s3-location-constraint
|
||||
|
||||
|
@ -21842,7 +21844,7 @@ There's a few steps we need to go through to accomplish this:
|
|||
scope> # Select your scope, 1 for example
|
||||
root_folder_id> # Can be left blank
|
||||
service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes!
|
||||
y/n> # Auto config, y
|
||||
y/n> # Auto config, n
|
||||
|
||||
4. Verify that it's working
|
||||
|
||||
|
@ -21860,8 +21862,7 @@ is unable to access the contents of that folder when using
|
|||
--drive-impersonate, do this instead: - in the gdrive web interface,
|
||||
share your root folder with the user/email of the new Service Account
|
||||
you created/selected at step #1 - use rclone without specifying the
|
||||
--drive-impersonate option, like this:
|
||||
rclone -v foo@example.com lsf gdrive:backup
|
||||
--drive-impersonate option, like this: rclone -v lsf gdrive:backup
|
||||
|
||||
Shared drives (team drives)
|
||||
|
||||
|
@ -25623,7 +25624,9 @@ you can get your own Client ID and Key by following the steps below:
|
|||
http://localhost:53682/ and click Register. Copy and keep the
|
||||
Application (client) ID under the app name for later use.
|
||||
3. Under manage select Certificates & secrets, click New client secret.
|
||||
Copy and keep that secret for later use.
|
||||
Enter a description (can be anything) and set Expires to 24 months.
|
||||
Copy and keep that secret _Value_ for later use (you _won't_ be able
|
||||
to see this value afterwards).
|
||||
4. Under manage select API permissions, click Add a permission and
|
||||
select Microsoft Graph then select delegated permissions.
|
||||
5. Search and select the following permissions: Files.Read,
|
||||
|
@ -26099,6 +26102,14 @@ and go through the process to refresh your token, just like the first
|
|||
time the backend is configured. After this, rclone should work again for
|
||||
this backend.
|
||||
|
||||
Invalid request when making public links
|
||||
|
||||
On Sharepoint and OneDrive for Business, rclone link may return an
|
||||
"Invalid request" error. A possible cause is that the organisation admin
|
||||
didn't allow public links to be made for the organisation/sharepoint
|
||||
library. To fix the permissions as an admin, take a look at the docs: 1,
|
||||
2.
|
||||
|
||||
|
||||
|
||||
OPENDRIVE
|
||||
|
@ -30844,6 +30855,65 @@ Options:
|
|||
CHANGELOG
|
||||
|
||||
|
||||
v1.56.2 - 2021-10-01
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- serve http: Re-add missing auth to http service (Nolan Woods)
|
||||
- build: Update golang.org/x/sys to fix crash on macOS when
|
||||
compiled with go1.17 (Herby Gillot)
|
||||
- FTP
|
||||
- Fix deadlock after failed update when concurrency=1 (Ivan
|
||||
Andreev)
|
||||
|
||||
|
||||
v1.56.1 - 2021-09-19
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- accounting: Fix maximum bwlimit by scaling scale max token
|
||||
bucket size (Nick Craig-Wood)
|
||||
- rc: Fix speed does not update in core/stats (negative0)
|
||||
- selfupdate: Fix --quiet option, not quite quiet (yedamo)
|
||||
- serve http: Fix serve http exiting directly after starting
|
||||
(Cnly)
|
||||
- build
|
||||
- Apply gofmt from golang 1.17 (Ivan Andreev)
|
||||
- Update Go to 1.16 and NDK to 22b for android/any (x0b)
|
||||
- Mount
|
||||
- Fix --daemon mode (Ivan Andreev)
|
||||
- VFS
|
||||
- Fix duplicates on rename (Nick Craig-Wood)
|
||||
- Fix crash when truncating a just uploaded object (Nick
|
||||
Craig-Wood)
|
||||
- Fix issue where empty dirs would build up in cache meta dir
|
||||
(albertony)
|
||||
- Drive
|
||||
- Fix instructions for auto config (Greg Sadetsky)
|
||||
- Fix lsf example without drive-impersonate (Greg Sadetsky)
|
||||
- Onedrive
|
||||
- Handle HTTP 400 better in PublicLink (Alex Chen)
|
||||
- Clarification of the process for creating custom client_id
|
||||
(Mariano Absatz)
|
||||
- Pcloud
|
||||
- Return an early error when Put is called with an unknown size
|
||||
(Nick Craig-Wood)
|
||||
- Try harder to delete a failed upload (Nick Craig-Wood)
|
||||
- S3
|
||||
- Add Wasabi's AP-Northeast endpoint info (hota)
|
||||
- Fix typo in s3 documentation (Greg Sadetsky)
|
||||
- Seafile
|
||||
- Fix 2fa config state machine (Fred)
|
||||
- SFTP
|
||||
- Remove spurious error message on --sftp-disable-concurrent-reads
|
||||
(Nick Craig-Wood)
|
||||
- Sugarsync
|
||||
- Fix initial connection after config re-arrangement (Nick
|
||||
Craig-Wood)
|
||||
|
||||
|
||||
v1.56.0 - 2021-07-20
|
||||
|
||||
See commits
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.56.0
|
||||
v1.56.2
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.14
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !solaris && !js && go1.14
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.14
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || !go1.14
|
||||
// +build plan9 solaris js !go1.14
|
||||
|
||||
package azureblob
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !solaris && !js && go1.14
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !solaris && !js && go1.14
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
|
1
backend/cache/cache.go
vendored
1
backend/cache/cache.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
4
backend/cache/cache_internal_test.go
vendored
4
backend/cache/cache_internal_test.go
vendored
|
@ -1,5 +1,5 @@
|
|||
// +build !plan9,!js
|
||||
// +build !race
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
|
4
backend/cache/cache_test.go
vendored
4
backend/cache/cache_test.go
vendored
|
@ -1,7 +1,7 @@
|
|||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
|
1
backend/cache/cache_unsupported.go
vendored
1
backend/cache/cache_unsupported.go
vendored
|
@ -1,6 +1,7 @@
|
|||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package cache
|
||||
|
|
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
|
@ -1,5 +1,5 @@
|
|||
// +build !plan9,!js
|
||||
// +build !race
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
|
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
1
backend/cache/handle.go
vendored
1
backend/cache/handle.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
1
backend/cache/storage_persistent.go
vendored
1
backend/cache/storage_persistent.go
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
|
|
@ -1062,8 +1062,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
}
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
// recycle connection in advance to let remove() find free token
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Test HDFS filesystem interface
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for hdfs for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package hdfs
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build darwin || dragonfly || freebsd || linux
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build darwin
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build !windows,!darwin
|
||||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build windows
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build !linux
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build linux
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Device reading functions
|
||||
|
||||
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Device reading functions
|
||||
|
||||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build !windows
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//+build windows
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
|
|
@ -1500,7 +1500,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
|
||||
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
|
|
@ -1092,6 +1092,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
modTime := src.ModTime(ctx)
|
||||
remote := o.Remote()
|
||||
|
||||
if size < 0 {
|
||||
return errors.New("can't upload unknown sizes objects")
|
||||
}
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
|
@ -1154,10 +1158,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
})
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
// error, so delete it if it exists
|
||||
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove(ctx)
|
||||
// error, so delete it if it exists, trying a few times
|
||||
for i := 0; i < 5; i++ {
|
||||
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove(ctx)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Test QingStor filesystem interface
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package qingstor
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Upload object to QingStor
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
|
|
@ -629,6 +629,10 @@ func init() {
|
|||
Value: "s3.eu-central-1.wasabisys.com",
|
||||
Help: "Wasabi EU Central endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
|
|
|
@ -325,17 +325,20 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||
|
||||
switch config.State {
|
||||
case "":
|
||||
// Just make sure we do have a password
|
||||
// Empty state means it's the first call to the Config function
|
||||
if password == "" {
|
||||
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
|
||||
return fs.ConfigPassword("password", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
|
||||
}
|
||||
return fs.ConfigGoto("password")
|
||||
// password was successfully loaded from the config
|
||||
return fs.ConfigGoto("2fa")
|
||||
case "password":
|
||||
// password should be coming from the previous state (entered by the user)
|
||||
password = config.Result
|
||||
if password == "" {
|
||||
return fs.ConfigError("password", "Password can't be blank")
|
||||
return fs.ConfigError("", "Password can't be blank")
|
||||
}
|
||||
m.Set(configPassword, obscure.MustObscure(config.Result))
|
||||
// save it into the configuration file and keep going
|
||||
m.Set(configPassword, obscure.MustObscure(password))
|
||||
return fs.ConfigGoto("2fa")
|
||||
case "2fa":
|
||||
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
package seafile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type pathData struct {
|
||||
|
@ -19,77 +24,77 @@ type pathData struct {
|
|||
// from a mix of configuration data and path command line argument
|
||||
func TestSplitPath(t *testing.T) {
|
||||
testData := []pathData{
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "Library",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("Library", "path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "path",
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "root",
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: path.Join("subpath", "to", "file"),
|
||||
|
@ -121,3 +126,98 @@ func TestSplitPathIntoSlice(t *testing.T) {
|
|||
assert.Equal(t, expected, output)
|
||||
}
|
||||
}
|
||||
|
||||
func Test2FAStateMachine(t *testing.T) {
|
||||
fixtures := []struct {
|
||||
name string
|
||||
mapper configmap.Mapper
|
||||
input fs.ConfigIn
|
||||
expectState string
|
||||
expectErrorMessage string
|
||||
expectResult string
|
||||
expectFail bool
|
||||
}{
|
||||
{
|
||||
name: "no url",
|
||||
mapper: configmap.Simple{},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "2fa not set",
|
||||
mapper: configmap.Simple{"url": "http://localhost/"},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "unknown state",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "unknown"},
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "no password in config",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectState: "password",
|
||||
},
|
||||
{
|
||||
name: "config ready for 2fa token",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username", "pass": obscure.MustObscure("password")},
|
||||
input: fs.ConfigIn{State: ""},
|
||||
expectState: "2fa",
|
||||
},
|
||||
{
|
||||
name: "password not entered",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "password"},
|
||||
expectState: "",
|
||||
expectErrorMessage: "Password can't be blank",
|
||||
},
|
||||
{
|
||||
name: "password entered",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "password", Result: "password"},
|
||||
expectState: "2fa",
|
||||
},
|
||||
{
|
||||
name: "ask for a 2fa code",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa"},
|
||||
expectState: "2fa_do",
|
||||
},
|
||||
{
|
||||
name: "no 2fa code entered",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa_do"},
|
||||
expectState: "2fa", // ask for a code again
|
||||
expectErrorMessage: "2FA codes can't be blank",
|
||||
},
|
||||
{
|
||||
name: "2fa error and retry",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa_error", Result: "true"},
|
||||
expectState: "2fa", // ask for a code again
|
||||
},
|
||||
{
|
||||
name: "2fa error and fail",
|
||||
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
|
||||
input: fs.ConfigIn{State: "2fa_error"},
|
||||
expectFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, fixture := range fixtures {
|
||||
t.Run(fixture.name, func(t *testing.T) {
|
||||
output, err := Config(context.Background(), "test", fixture.mapper, fixture.input)
|
||||
if fixture.expectFail {
|
||||
require.Error(t, err)
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
assert.Equal(t, fixture.expectState, output.State)
|
||||
assert.Equal(t, fixture.expectErrorMessage, output.Error)
|
||||
assert.Equal(t, fixture.expectResult, output.Result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
@ -429,10 +430,6 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
|||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
|
||||
)
|
||||
if f.opt.DisableConcurrentReads { // FIXME
|
||||
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Test Sftp filesystem interface
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp_test
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for sftp for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package sftp
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
//go:build !dev
|
||||
// +build !dev
|
||||
|
||||
package sharefile
|
||||
|
|
|
@ -105,7 +105,7 @@ func init() {
|
|||
|
||||
authRequest := api.AppAuthorization{
|
||||
Username: username,
|
||||
Password: password,
|
||||
Password: obscure.MustReveal(password),
|
||||
Application: withDefault(opt.AppID, appID),
|
||||
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package tardigrade
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package tardigrade
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Attempt to work out if branches have already been merged
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Cross compile rclone - in go because I hate bash ;-)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Get the latest release from a github project
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
// Test that the tests in the suite passed in are independent
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cachestats
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package cachestats
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
// Build for macos with the brew tag to handle the absence
|
||||
// of fuse and print an appropriate error message
|
||||
|
||||
// +build brew
|
||||
// +build darwin
|
||||
//go:build brew && darwin
|
||||
// +build brew,darwin
|
||||
|
||||
package cmount
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build cmount && cgo && (linux || darwin || freebsd || windows) && (!race || !windows)
|
||||
// +build cmount
|
||||
// +build cgo
|
||||
// +build linux darwin freebsd windows
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build (!linux && !darwin && !freebsd && !windows) || !brew || !cgo || !cmount
|
||||
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||
|
||||
package cmount
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// +build cmount
|
||||
// +build cgo
|
||||
// +build !windows
|
||||
//go:build cmount && cgo && !windows
|
||||
// +build cmount,cgo,!windows
|
||||
|
||||
package cmount
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// +build cmount
|
||||
// +build cgo
|
||||
// +build windows
|
||||
//go:build cmount && cgo && windows
|
||||
// +build cmount,cgo,windows
|
||||
|
||||
package cmount
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// FUSE main Fs
|
||||
|
||||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Package mount implements a FUSE mounting system for rclone remotes.
|
||||
|
||||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// Invert the build constraint: linux freebsd
|
||||
|
||||
// +build !linux
|
||||
// +build !freebsd
|
||||
//go:build !linux && !freebsd
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package mount
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || (darwin && amd64)
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// FUSE main Fs
|
||||
|
||||
//go:build linux || (darwin && amd64)
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Package mount implements a FUSE mounting system for rclone remotes.
|
||||
|
||||
//go:build linux || (darwin && amd64)
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || (darwin && amd64)
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build !linux && (!darwin || !amd64)
|
||||
// +build !linux
|
||||
// +build !darwin !amd64
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build linux || (darwin && amd64)
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Daemonization interface for non-Unix variants only
|
||||
|
||||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package mountlib
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Daemonization interface for Unix variants only
|
||||
|
||||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package mountlib
|
||||
|
|
|
@ -145,8 +145,8 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
|||
VFSOpt: vfsflags.Opt,
|
||||
}
|
||||
|
||||
err := mnt.Mount()
|
||||
if err == nil {
|
||||
daemonized, err := mnt.Mount()
|
||||
if !daemonized && err == nil {
|
||||
err = mnt.Wait()
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -167,21 +167,21 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
|||
}
|
||||
|
||||
// Mount the remote at mountpoint
|
||||
func (m *MountPoint) Mount() (err error) {
|
||||
func (m *MountPoint) Mount() (daemonized bool, err error) {
|
||||
if err = m.CheckOverlap(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err = m.CheckAllowings(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
|
||||
// Start background task if --background is specified
|
||||
// Start background task if --daemon is specified
|
||||
if m.MountOpt.Daemon {
|
||||
daemonized := startBackgroundMode()
|
||||
daemonized = startBackgroundMode()
|
||||
if daemonized {
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,9 +189,9 @@ func (m *MountPoint) Mount() (err error) {
|
|||
|
||||
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
return false, errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CheckOverlap checks that root doesn't overlap with mountpoint
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package mountlib
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package mountlib
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Package ncdu implements a text based user interface for exploring a remote
|
||||
|
||||
//+build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package ncdu
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Build for ncdu for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package ncdu
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !noselfupdate
|
||||
// +build !noselfupdate
|
||||
|
||||
package selfupdate
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue