Compare commits
95 commits
tcl/master
...
v1.53-stab
Author | SHA1 | Date | |
---|---|---|---|
|
c3dfa7d9a3 | ||
|
1936847548 | ||
|
89b4ccbbfa | ||
|
3c985a436b | ||
|
703f6002dd | ||
|
7de13fc426 | ||
|
c2f6d48d45 | ||
|
9d9999d17b | ||
|
15f31d3ca4 | ||
|
0ea51f74a1 | ||
|
6cd360233d | ||
|
687d2d495b | ||
|
50a107a5f3 | ||
|
2ed2861d09 | ||
|
e2cd449c62 | ||
|
98dbbc78ab | ||
|
53c4191350 | ||
|
e4ece15e68 | ||
|
fbf46908bf | ||
|
a96539eeec | ||
|
86cd5230d7 | ||
|
716019cf7d | ||
|
c59fe40795 | ||
|
ecd60f2430 | ||
|
d2a5640c3a | ||
|
8d3acfb38c | ||
|
200de46249 | ||
|
cee618bc03 | ||
|
db2aa771dc | ||
|
55bd60019e | ||
|
c8b11d27e1 | ||
|
4c215cc81e | ||
|
4df333255a | ||
|
843d684568 | ||
|
46ea3d93b5 | ||
|
89f2d43f17 | ||
|
cfc5d76fca | ||
|
0af493f693 | ||
|
51b3ee9a97 | ||
|
6a4b49479d | ||
|
4b03ee0f99 | ||
|
2f6231f7ac | ||
|
c0e6f54f01 | ||
|
def7b77d0f | ||
|
51b18a4a26 | ||
|
7cb76f9054 | ||
|
00ccc93482 | ||
|
f9fe494d93 | ||
|
4a0c266787 | ||
|
f48d0a518c | ||
|
99ff594773 | ||
|
6c140705e3 | ||
|
e76963a971 | ||
|
43ad7b10a2 | ||
|
f6970c65dd | ||
|
6012179c67 | ||
|
3ecdd4516f | ||
|
3b18ba1358 | ||
|
5fbbab58ed | ||
|
80b93beedf | ||
|
eb5c47fcfa | ||
|
c7335e780b | ||
|
878ebf3658 | ||
|
1c860ef252 | ||
|
a0494479f9 | ||
|
9a9a134188 | ||
|
41ccf01f29 | ||
|
06f3daa64b | ||
|
d5fe63c0a0 | ||
|
b7f0e776f6 | ||
|
b89f8c05cf | ||
|
b81dc16484 | ||
|
0e121eeddb | ||
|
0430163180 | ||
|
09a0dc1600 | ||
|
dd11778ac6 | ||
|
f36cbe5194 | ||
|
82a383588b | ||
|
8ae4d2cffe | ||
|
0f895c0697 | ||
|
937dd7fa1f | ||
|
33869387d1 | ||
|
3ec8e304b3 | ||
|
e62362094e | ||
|
6a0398211d | ||
|
e5a53d4c65 | ||
|
59d5767a07 | ||
|
087b5788e2 | ||
|
d944bfd936 | ||
|
d780fcf317 | ||
|
0a9b8eac80 | ||
|
1272a8f9a5 | ||
|
0b40eaedaf | ||
|
8340ff4fb9 | ||
|
f5abc168ed |
105 changed files with 35889 additions and 25961 deletions
21
.github/workflows/build.yml
vendored
21
.github/workflows/build.yml
vendored
|
@ -46,6 +46,7 @@ jobs:
|
||||||
go: '1.15.x'
|
go: '1.15.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^windows/amd64" -cgo'
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
|
build_args: '-buildmode exe'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
@ -57,6 +58,7 @@ jobs:
|
||||||
goarch: '386'
|
goarch: '386'
|
||||||
cgo: '1'
|
cgo: '1'
|
||||||
build_flags: '-include "^windows/386" -cgo'
|
build_flags: '-include "^windows/386" -cgo'
|
||||||
|
build_args: '-buildmode exe'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
|
@ -107,10 +109,11 @@ jobs:
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||||
|
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||||
|
|
||||||
- name: Install Libraries on Linux
|
- name: Install Libraries on Linux
|
||||||
shell: bash
|
shell: bash
|
||||||
|
@ -125,7 +128,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
brew cask install osxfuse
|
brew install --cask osxfuse
|
||||||
if: matrix.os == 'macOS-latest'
|
if: matrix.os == 'macOS-latest'
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
|
@ -133,10 +136,10 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
$ProgressPreference = 'SilentlyContinue'
|
$ProgressPreference = 'SilentlyContinue'
|
||||||
choco install -y winfsp zip
|
choco install -y winfsp zip
|
||||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||||
if ($env:GOARCH -eq "386") {
|
if ($env:GOARCH -eq "386") {
|
||||||
choco install -y mingw --forcex86 --force
|
choco install -y mingw --forcex86 --force
|
||||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
}
|
}
|
||||||
# Copy mingw32-make.exe to make.exe so the same command line
|
# Copy mingw32-make.exe to make.exe so the same command line
|
||||||
# can be used on Windows as on macOS and Linux
|
# can be used on Windows as on macOS and Linux
|
||||||
|
@ -223,8 +226,8 @@ jobs:
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||||
echo '::add-path::${{ runner.workspace }}/bin'
|
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||||
|
|
||||||
- name: Cross-compile rclone
|
- name: Cross-compile rclone
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
with:
|
with:
|
||||||
tag: beta
|
tag: beta
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
id: actual_major_version
|
id: actual_major_version
|
||||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
with:
|
with:
|
||||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
|
|
17585
MANUAL.html
generated
17585
MANUAL.html
generated
File diff suppressed because one or more lines are too long
583
MANUAL.md
generated
583
MANUAL.md
generated
|
@ -1,6 +1,6 @@
|
||||||
% rclone(1) User Manual
|
% rclone(1) User Manual
|
||||||
% Nick Craig-Wood
|
% Nick Craig-Wood
|
||||||
% Sep 02, 2020
|
% Nov 19, 2020
|
||||||
|
|
||||||
# Rclone syncs your files to cloud storage
|
# Rclone syncs your files to cloud storage
|
||||||
|
|
||||||
|
@ -146,6 +146,7 @@ WebDAV or S3, that work out of the box.)
|
||||||
- StackPath
|
- StackPath
|
||||||
- SugarSync
|
- SugarSync
|
||||||
- Tardigrade
|
- Tardigrade
|
||||||
|
- Tencent Cloud Object Storage (COS)
|
||||||
- Wasabi
|
- Wasabi
|
||||||
- WebDAV
|
- WebDAV
|
||||||
- Yandex Disk
|
- Yandex Disk
|
||||||
|
@ -503,7 +504,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||||
|
|
||||||
# rclone copy
|
# rclone copy
|
||||||
|
|
||||||
Copy files from source to dest, skipping already copied
|
Copy files from source to dest, skipping already copied.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
@ -833,7 +834,7 @@ the source match the files in the destination, not the other way
|
||||||
around. This means that extra files in the destination that are not in
|
around. This means that extra files in the destination that are not in
|
||||||
the source will not be detected.
|
the source will not be detected.
|
||||||
|
|
||||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||||
and `--error` flags write paths, one per line, to the file name (or
|
and `--error` flags write paths, one per line, to the file name (or
|
||||||
stdout if it is `-`) supplied. What they write is described in the
|
stdout if it is `-`) supplied. What they write is described in the
|
||||||
help below. For example `--differ` will write all paths which are
|
help below. For example `--differ` will write all paths which are
|
||||||
|
@ -859,6 +860,7 @@ rclone check source:path dest:path [flags]
|
||||||
```
|
```
|
||||||
--combined string Make a combined report of changes to this file
|
--combined string Make a combined report of changes to this file
|
||||||
--differ string Report all non-matching files to this file
|
--differ string Report all non-matching files to this file
|
||||||
|
--download Check by downloading rather than with hash.
|
||||||
--error string Report all files with errors (hashing or reading) to this file
|
--error string Report all files with errors (hashing or reading) to this file
|
||||||
-h, --help help for check
|
-h, --help help for check
|
||||||
--match string Report all matching files to this file
|
--match string Report all matching files to this file
|
||||||
|
@ -1191,7 +1193,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||||
|
|
||||||
# rclone cleanup
|
# rclone cleanup
|
||||||
|
|
||||||
Clean up the remote if possible
|
Clean up the remote if possible.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
@ -1915,7 +1917,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||||
|
|
||||||
# rclone copyto
|
# rclone copyto
|
||||||
|
|
||||||
Copy files from source to dest, skipping already copied
|
Copy files from source to dest, skipping already copied.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
@ -2040,7 +2042,7 @@ the source match the files in the destination, not the other way
|
||||||
around. This means that extra files in the destination that are not in
|
around. This means that extra files in the destination that are not in
|
||||||
the source will not be detected.
|
the source will not be detected.
|
||||||
|
|
||||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||||
and `--error` flags write paths, one per line, to the file name (or
|
and `--error` flags write paths, one per line, to the file name (or
|
||||||
stdout if it is `-`) supplied. What they write is described in the
|
stdout if it is `-`) supplied. What they write is described in the
|
||||||
help below. For example `--differ` will write all paths which are
|
help below. For example `--differ` will write all paths which are
|
||||||
|
@ -2434,7 +2436,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||||
|
|
||||||
# rclone lsf
|
# rclone lsf
|
||||||
|
|
||||||
List directories and objects in remote:path formatted for parsing
|
List directories and objects in remote:path formatted for parsing.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
@ -2744,6 +2746,9 @@ Stopping the mount manually:
|
||||||
# OS X
|
# OS X
|
||||||
umount /path/to/local/mount
|
umount /path/to/local/mount
|
||||||
|
|
||||||
|
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||||
|
or newer on some platforms depending on the underlying FUSE library in use.
|
||||||
|
|
||||||
## Installing on Windows
|
## Installing on Windows
|
||||||
|
|
||||||
To run rclone mount on Windows, you will need to
|
To run rclone mount on Windows, you will need to
|
||||||
|
@ -2886,9 +2891,6 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||||
|
|
||||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
|
||||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
|
||||||
|
|
||||||
## VFS - Virtual File System
|
## VFS - Virtual File System
|
||||||
|
|
||||||
This command uses the VFS layer. This adapts the cloud storage objects
|
This command uses the VFS layer. This adapts the cloud storage objects
|
||||||
|
@ -3052,6 +3054,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
@ -3289,7 +3296,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||||
|
|
||||||
# rclone obscure
|
# rclone obscure
|
||||||
|
|
||||||
Obscure password for use in the rclone config file
|
Obscure password for use in the rclone config file.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
@ -3754,6 +3761,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
@ -4056,6 +4068,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
@ -4514,6 +4531,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
@ -5035,6 +5057,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
@ -5502,6 +5529,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
@ -6502,6 +6534,8 @@ This can be useful for tracking down problems with syncs in
|
||||||
combination with the `-v` flag. See the [Logging section](#logging)
|
combination with the `-v` flag. See the [Logging section](#logging)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
|
If FILE exists then rclone will append to it.
|
||||||
|
|
||||||
Note that if you are using the `logrotate` program to manage rclone's
|
Note that if you are using the `logrotate` program to manage rclone's
|
||||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||||
have a signal to rotate logs.
|
have a signal to rotate logs.
|
||||||
|
@ -6996,11 +7030,17 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||||
|
|
||||||
will sync `/path/to/local` to `remote:current`, but for any files
|
will copy `/path/to/local` to `remote:current`, but for any files
|
||||||
which would have been updated or deleted have .bak added.
|
which would have been updated or deleted have .bak added.
|
||||||
|
|
||||||
|
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||||
|
it is recommended to put a filter rule in excluding the suffix
|
||||||
|
otherwise the `sync` will delete the backup files.
|
||||||
|
|
||||||
|
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||||
|
|
||||||
### --suffix-keep-extension ###
|
### --suffix-keep-extension ###
|
||||||
|
|
||||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||||
|
@ -8893,6 +8933,8 @@ OR
|
||||||
"result": "<Raw command line output>"
|
"result": "<Raw command line output>"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
**Authentication is required for this call.**
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
### core/gc: Runs a garbage collection. {#core-gc}
|
### core/gc: Runs a garbage collection. {#core-gc}
|
||||||
|
@ -9568,7 +9610,7 @@ This allows you to remove a plugin using it's name
|
||||||
|
|
||||||
This takes parameters
|
This takes parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format `author`/`plugin_name`
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
@ -9582,7 +9624,7 @@ This allows you to remove a plugin using it's name
|
||||||
|
|
||||||
This takes the following parameters
|
This takes the following parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format `author`/`plugin_name`
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
@ -10527,7 +10569,7 @@ These flags are available for every command.
|
||||||
--use-json-log Use json log format.
|
--use-json-log Use json log format.
|
||||||
--use-mmap Use mmap allocator (see docs).
|
--use-mmap Use mmap allocator (see docs).
|
||||||
--use-server-modtime Use server modified time instead of object metadata
|
--use-server-modtime Use server modified time instead of object metadata
|
||||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.0")
|
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.3")
|
||||||
-v, --verbose count Print lots more stuff (repeat for more)
|
-v, --verbose count Print lots more stuff (repeat for more)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -10626,7 +10668,7 @@ and may be set in the config file.
|
||||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||||
--drive-auth-url string Auth server URL.
|
--drive-auth-url string Auth server URL.
|
||||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||||
--drive-client-id string OAuth Client Id
|
--drive-client-id string Google Application Client Id
|
||||||
--drive-client-secret string OAuth Client Secret
|
--drive-client-secret string OAuth Client Secret
|
||||||
--drive-disable-http2 Disable drive using http2 (default true)
|
--drive-disable-http2 Disable drive using http2 (default true)
|
||||||
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
||||||
|
@ -11475,6 +11517,7 @@ The S3 backend can be used with a number of different providers:
|
||||||
- Minio
|
- Minio
|
||||||
- Scaleway
|
- Scaleway
|
||||||
- StackPath
|
- StackPath
|
||||||
|
- Tencent Cloud Object Storage (COS)
|
||||||
- Wasabi
|
- Wasabi
|
||||||
|
|
||||||
|
|
||||||
|
@ -11595,7 +11638,7 @@ Choose a number from below, or type in your own value
|
||||||
/ Asia Pacific (Mumbai)
|
/ Asia Pacific (Mumbai)
|
||||||
13 | Needs location constraint ap-south-1.
|
13 | Needs location constraint ap-south-1.
|
||||||
\ "ap-south-1"
|
\ "ap-south-1"
|
||||||
/ Asia Patific (Hong Kong) Region
|
/ Asia Pacific (Hong Kong) Region
|
||||||
14 | Needs location constraint ap-east-1.
|
14 | Needs location constraint ap-east-1.
|
||||||
\ "ap-east-1"
|
\ "ap-east-1"
|
||||||
/ South America (Sao Paulo) Region
|
/ South America (Sao Paulo) Region
|
||||||
|
@ -11912,7 +11955,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
|
||||||
|
|
||||||
### Standard Options
|
### Standard Options
|
||||||
|
|
||||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||||
|
|
||||||
#### --s3-provider
|
#### --s3-provider
|
||||||
|
|
||||||
|
@ -11943,6 +11986,8 @@ Choose your S3 provider.
|
||||||
- Scaleway Object Storage
|
- Scaleway Object Storage
|
||||||
- "StackPath"
|
- "StackPath"
|
||||||
- StackPath Object Storage
|
- StackPath Object Storage
|
||||||
|
- "TencentCOS"
|
||||||
|
- Tencent Cloud Object Storage (COS)
|
||||||
- "Wasabi"
|
- "Wasabi"
|
||||||
- Wasabi Object Storage
|
- Wasabi Object Storage
|
||||||
- "Other"
|
- "Other"
|
||||||
|
@ -11999,12 +12044,12 @@ Region to connect to.
|
||||||
- "us-east-2"
|
- "us-east-2"
|
||||||
- US East (Ohio) Region
|
- US East (Ohio) Region
|
||||||
- Needs location constraint us-east-2.
|
- Needs location constraint us-east-2.
|
||||||
- "us-west-2"
|
|
||||||
- US West (Oregon) Region
|
|
||||||
- Needs location constraint us-west-2.
|
|
||||||
- "us-west-1"
|
- "us-west-1"
|
||||||
- US West (Northern California) Region
|
- US West (Northern California) Region
|
||||||
- Needs location constraint us-west-1.
|
- Needs location constraint us-west-1.
|
||||||
|
- "us-west-2"
|
||||||
|
- US West (Oregon) Region
|
||||||
|
- Needs location constraint us-west-2.
|
||||||
- "ca-central-1"
|
- "ca-central-1"
|
||||||
- Canada (Central) Region
|
- Canada (Central) Region
|
||||||
- Needs location constraint ca-central-1.
|
- Needs location constraint ca-central-1.
|
||||||
|
@ -12014,9 +12059,15 @@ Region to connect to.
|
||||||
- "eu-west-2"
|
- "eu-west-2"
|
||||||
- EU (London) Region
|
- EU (London) Region
|
||||||
- Needs location constraint eu-west-2.
|
- Needs location constraint eu-west-2.
|
||||||
|
- "eu-west-3"
|
||||||
|
- EU (Paris) Region
|
||||||
|
- Needs location constraint eu-west-3.
|
||||||
- "eu-north-1"
|
- "eu-north-1"
|
||||||
- EU (Stockholm) Region
|
- EU (Stockholm) Region
|
||||||
- Needs location constraint eu-north-1.
|
- Needs location constraint eu-north-1.
|
||||||
|
- "eu-south-1"
|
||||||
|
- EU (Milan) Region
|
||||||
|
- Needs location constraint eu-south-1.
|
||||||
- "eu-central-1"
|
- "eu-central-1"
|
||||||
- EU (Frankfurt) Region
|
- EU (Frankfurt) Region
|
||||||
- Needs location constraint eu-central-1.
|
- Needs location constraint eu-central-1.
|
||||||
|
@ -12032,15 +12083,36 @@ Region to connect to.
|
||||||
- "ap-northeast-2"
|
- "ap-northeast-2"
|
||||||
- Asia Pacific (Seoul)
|
- Asia Pacific (Seoul)
|
||||||
- Needs location constraint ap-northeast-2.
|
- Needs location constraint ap-northeast-2.
|
||||||
|
- "ap-northeast-3"
|
||||||
|
- Asia Pacific (Osaka-Local)
|
||||||
|
- Needs location constraint ap-northeast-3.
|
||||||
- "ap-south-1"
|
- "ap-south-1"
|
||||||
- Asia Pacific (Mumbai)
|
- Asia Pacific (Mumbai)
|
||||||
- Needs location constraint ap-south-1.
|
- Needs location constraint ap-south-1.
|
||||||
- "ap-east-1"
|
- "ap-east-1"
|
||||||
- Asia Patific (Hong Kong) Region
|
- Asia Pacific (Hong Kong) Region
|
||||||
- Needs location constraint ap-east-1.
|
- Needs location constraint ap-east-1.
|
||||||
- "sa-east-1"
|
- "sa-east-1"
|
||||||
- South America (Sao Paulo) Region
|
- South America (Sao Paulo) Region
|
||||||
- Needs location constraint sa-east-1.
|
- Needs location constraint sa-east-1.
|
||||||
|
- "me-south-1"
|
||||||
|
- Middle East (Bahrain) Region
|
||||||
|
- Needs location constraint me-south-1.
|
||||||
|
- "af-south-1"
|
||||||
|
- Africa (Cape Town) Region
|
||||||
|
- Needs location constraint af-south-1.
|
||||||
|
- "cn-north-1"
|
||||||
|
- China (Beijing) Region
|
||||||
|
- Needs location constraint cn-north-1.
|
||||||
|
- "cn-northwest-1"
|
||||||
|
- China (Ningxia) Region
|
||||||
|
- Needs location constraint cn-northwest-1.
|
||||||
|
- "us-gov-east-1"
|
||||||
|
- AWS GovCloud (US-East) Region
|
||||||
|
- Needs location constraint us-gov-east-1.
|
||||||
|
- "us-gov-west-1"
|
||||||
|
- AWS GovCloud (US) Region
|
||||||
|
- Needs location constraint us-gov-west-1.
|
||||||
|
|
||||||
#### --s3-region
|
#### --s3-region
|
||||||
|
|
||||||
|
@ -12296,6 +12368,54 @@ Endpoint for StackPath Object Storage.
|
||||||
|
|
||||||
#### --s3-endpoint
|
#### --s3-endpoint
|
||||||
|
|
||||||
|
Endpoint for Tencent COS API.
|
||||||
|
|
||||||
|
- Config: endpoint
|
||||||
|
- Env Var: RCLONE_S3_ENDPOINT
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
- Examples:
|
||||||
|
- "cos.ap-beijing.myqcloud.com"
|
||||||
|
- Beijing Region.
|
||||||
|
- "cos.ap-nanjing.myqcloud.com"
|
||||||
|
- Nanjing Region.
|
||||||
|
- "cos.ap-shanghai.myqcloud.com"
|
||||||
|
- Shanghai Region.
|
||||||
|
- "cos.ap-guangzhou.myqcloud.com"
|
||||||
|
- Guangzhou Region.
|
||||||
|
- "cos.ap-nanjing.myqcloud.com"
|
||||||
|
- Nanjing Region.
|
||||||
|
- "cos.ap-chengdu.myqcloud.com"
|
||||||
|
- Chengdu Region.
|
||||||
|
- "cos.ap-chongqing.myqcloud.com"
|
||||||
|
- Chongqing Region.
|
||||||
|
- "cos.ap-hongkong.myqcloud.com"
|
||||||
|
- Hong Kong (China) Region.
|
||||||
|
- "cos.ap-singapore.myqcloud.com"
|
||||||
|
- Singapore Region.
|
||||||
|
- "cos.ap-mumbai.myqcloud.com"
|
||||||
|
- Mumbai Region.
|
||||||
|
- "cos.ap-seoul.myqcloud.com"
|
||||||
|
- Seoul Region.
|
||||||
|
- "cos.ap-bangkok.myqcloud.com"
|
||||||
|
- Bangkok Region.
|
||||||
|
- "cos.ap-tokyo.myqcloud.com"
|
||||||
|
- Tokyo Region.
|
||||||
|
- "cos.na-siliconvalley.myqcloud.com"
|
||||||
|
- Silicon Valley Region.
|
||||||
|
- "cos.na-ashburn.myqcloud.com"
|
||||||
|
- Virginia Region.
|
||||||
|
- "cos.na-toronto.myqcloud.com"
|
||||||
|
- Toronto Region.
|
||||||
|
- "cos.eu-frankfurt.myqcloud.com"
|
||||||
|
- Frankfurt Region.
|
||||||
|
- "cos.eu-moscow.myqcloud.com"
|
||||||
|
- Moscow Region.
|
||||||
|
- "cos.accelerate.myqcloud.com"
|
||||||
|
- Use Tencent COS Accelerate Endpoint.
|
||||||
|
|
||||||
|
#### --s3-endpoint
|
||||||
|
|
||||||
Endpoint for S3 API.
|
Endpoint for S3 API.
|
||||||
Required when using an S3 clone.
|
Required when using an S3 clone.
|
||||||
|
|
||||||
|
@ -12333,18 +12453,22 @@ Used when creating buckets only.
|
||||||
- Empty for US Region, Northern Virginia or Pacific Northwest.
|
- Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||||
- "us-east-2"
|
- "us-east-2"
|
||||||
- US East (Ohio) Region.
|
- US East (Ohio) Region.
|
||||||
- "us-west-2"
|
|
||||||
- US West (Oregon) Region.
|
|
||||||
- "us-west-1"
|
- "us-west-1"
|
||||||
- US West (Northern California) Region.
|
- US West (Northern California) Region.
|
||||||
|
- "us-west-2"
|
||||||
|
- US West (Oregon) Region.
|
||||||
- "ca-central-1"
|
- "ca-central-1"
|
||||||
- Canada (Central) Region.
|
- Canada (Central) Region.
|
||||||
- "eu-west-1"
|
- "eu-west-1"
|
||||||
- EU (Ireland) Region.
|
- EU (Ireland) Region.
|
||||||
- "eu-west-2"
|
- "eu-west-2"
|
||||||
- EU (London) Region.
|
- EU (London) Region.
|
||||||
|
- "eu-west-3"
|
||||||
|
- EU (Paris) Region.
|
||||||
- "eu-north-1"
|
- "eu-north-1"
|
||||||
- EU (Stockholm) Region.
|
- EU (Stockholm) Region.
|
||||||
|
- "eu-south-1"
|
||||||
|
- EU (Milan) Region.
|
||||||
- "EU"
|
- "EU"
|
||||||
- EU Region.
|
- EU Region.
|
||||||
- "ap-southeast-1"
|
- "ap-southeast-1"
|
||||||
|
@ -12354,13 +12478,27 @@ Used when creating buckets only.
|
||||||
- "ap-northeast-1"
|
- "ap-northeast-1"
|
||||||
- Asia Pacific (Tokyo) Region.
|
- Asia Pacific (Tokyo) Region.
|
||||||
- "ap-northeast-2"
|
- "ap-northeast-2"
|
||||||
- Asia Pacific (Seoul)
|
- Asia Pacific (Seoul) Region.
|
||||||
|
- "ap-northeast-3"
|
||||||
|
- Asia Pacific (Osaka-Local) Region.
|
||||||
- "ap-south-1"
|
- "ap-south-1"
|
||||||
- Asia Pacific (Mumbai)
|
- Asia Pacific (Mumbai) Region.
|
||||||
- "ap-east-1"
|
- "ap-east-1"
|
||||||
- Asia Pacific (Hong Kong)
|
- Asia Pacific (Hong Kong) Region.
|
||||||
- "sa-east-1"
|
- "sa-east-1"
|
||||||
- South America (Sao Paulo) Region.
|
- South America (Sao Paulo) Region.
|
||||||
|
- "me-south-1"
|
||||||
|
- Middle East (Bahrain) Region.
|
||||||
|
- "af-south-1"
|
||||||
|
- Africa (Cape Town) Region.
|
||||||
|
- "cn-north-1"
|
||||||
|
- China (Beijing) Region
|
||||||
|
- "cn-northwest-1"
|
||||||
|
- China (Ningxia) Region.
|
||||||
|
- "us-gov-east-1"
|
||||||
|
- AWS GovCloud (US-East) Region.
|
||||||
|
- "us-gov-west-1"
|
||||||
|
- AWS GovCloud (US) Region.
|
||||||
|
|
||||||
#### --s3-location-constraint
|
#### --s3-location-constraint
|
||||||
|
|
||||||
|
@ -12463,6 +12601,8 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||||
- Type: string
|
- Type: string
|
||||||
- Default: ""
|
- Default: ""
|
||||||
- Examples:
|
- Examples:
|
||||||
|
- "default"
|
||||||
|
- Owner gets Full_CONTROL. No one else has access rights (default).
|
||||||
- "private"
|
- "private"
|
||||||
- Owner gets FULL_CONTROL. No one else has access rights (default).
|
- Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||||
- "public-read"
|
- "public-read"
|
||||||
|
@ -12563,6 +12703,24 @@ The storage class to use when storing new objects in OSS.
|
||||||
|
|
||||||
#### --s3-storage-class
|
#### --s3-storage-class
|
||||||
|
|
||||||
|
The storage class to use when storing new objects in Tencent COS.
|
||||||
|
|
||||||
|
- Config: storage_class
|
||||||
|
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
- Examples:
|
||||||
|
- ""
|
||||||
|
- Default
|
||||||
|
- "STANDARD"
|
||||||
|
- Standard storage class
|
||||||
|
- "ARCHIVE"
|
||||||
|
- Archive storage mode.
|
||||||
|
- "STANDARD_IA"
|
||||||
|
- Infrequent access storage mode.
|
||||||
|
|
||||||
|
#### --s3-storage-class
|
||||||
|
|
||||||
The storage class to use when storing new objects in S3.
|
The storage class to use when storing new objects in S3.
|
||||||
|
|
||||||
- Config: storage_class
|
- Config: storage_class
|
||||||
|
@ -12579,7 +12737,7 @@ The storage class to use when storing new objects in S3.
|
||||||
|
|
||||||
### Advanced Options
|
### Advanced Options
|
||||||
|
|
||||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||||
|
|
||||||
#### --s3-bucket-acl
|
#### --s3-bucket-acl
|
||||||
|
|
||||||
|
@ -12800,7 +12958,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||||
false - rclone will do this automatically based on the provider
|
false - rclone will do this automatically based on the provider
|
||||||
setting.
|
setting.
|
||||||
|
|
||||||
|
@ -13669,6 +13827,138 @@ d) Delete this remote
|
||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Tencent COS {#tencent-cos}
|
||||||
|
|
||||||
|
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
|
||||||
|
|
||||||
|
To configure access to Tencent COS, follow the steps below:
|
||||||
|
|
||||||
|
1. Run `rclone config` and select `n` for a new remote.
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone config
|
||||||
|
No remotes found - make a new one
|
||||||
|
n) New remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
n/s/q> n
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Give the name of the configuration. For example, name it 'cos'.
|
||||||
|
|
||||||
|
```
|
||||||
|
name> cos
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Select `s3` storage.
|
||||||
|
|
||||||
|
```
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / 1Fichier
|
||||||
|
\ "fichier"
|
||||||
|
2 / Alias for an existing remote
|
||||||
|
\ "alias"
|
||||||
|
3 / Amazon Drive
|
||||||
|
\ "amazon cloud drive"
|
||||||
|
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
|
||||||
|
\ "s3"
|
||||||
|
[snip]
|
||||||
|
Storage> s3
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Select `TencentCOS` provider.
|
||||||
|
```
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Amazon Web Services (AWS) S3
|
||||||
|
\ "AWS"
|
||||||
|
[snip]
|
||||||
|
11 / Tencent Cloud Object Storage (COS)
|
||||||
|
\ "TencentCOS"
|
||||||
|
[snip]
|
||||||
|
provider> TencentCOS
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Enter your SecretId and SecretKey of Tencent Cloud.
|
||||||
|
|
||||||
|
```
|
||||||
|
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||||
|
Only applies if access_key_id and secret_access_key is blank.
|
||||||
|
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Enter AWS credentials in the next step
|
||||||
|
\ "false"
|
||||||
|
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||||
|
\ "true"
|
||||||
|
env_auth> 1
|
||||||
|
AWS Access Key ID.
|
||||||
|
Leave blank for anonymous access or runtime credentials.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
access_key_id> AKIDxxxxxxxxxx
|
||||||
|
AWS Secret Access Key (password)
|
||||||
|
Leave blank for anonymous access or runtime credentials.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
secret_access_key> xxxxxxxxxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
|
||||||
|
|
||||||
|
```
|
||||||
|
1 / Beijing Region.
|
||||||
|
\ "cos.ap-beijing.myqcloud.com"
|
||||||
|
2 / Nanjing Region.
|
||||||
|
\ "cos.ap-nanjing.myqcloud.com"
|
||||||
|
3 / Shanghai Region.
|
||||||
|
\ "cos.ap-shanghai.myqcloud.com"
|
||||||
|
4 / Guangzhou Region.
|
||||||
|
\ "cos.ap-guangzhou.myqcloud.com"
|
||||||
|
[snip]
|
||||||
|
endpoint> 4
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Choose acl and storage class.
|
||||||
|
|
||||||
|
```
|
||||||
|
Note that this ACL is applied when server side copying objects as S3
|
||||||
|
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Owner gets Full_CONTROL. No one else has access rights (default).
|
||||||
|
\ "default"
|
||||||
|
[snip]
|
||||||
|
acl> 1
|
||||||
|
The storage class to use when storing new objects in Tencent COS.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Default
|
||||||
|
\ ""
|
||||||
|
[snip]
|
||||||
|
storage_class> 1
|
||||||
|
Edit advanced config? (y/n)
|
||||||
|
y) Yes
|
||||||
|
n) No (default)
|
||||||
|
y/n> n
|
||||||
|
Remote config
|
||||||
|
--------------------
|
||||||
|
[cos]
|
||||||
|
type = s3
|
||||||
|
provider = TencentCOS
|
||||||
|
env_auth = false
|
||||||
|
access_key_id = xxx
|
||||||
|
secret_access_key = xxx
|
||||||
|
endpoint = cos.ap-guangzhou.myqcloud.com
|
||||||
|
acl = default
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
Current remotes:
|
||||||
|
|
||||||
|
Name Type
|
||||||
|
==== ====
|
||||||
|
cos s3
|
||||||
|
```
|
||||||
|
|
||||||
### Netease NOS ###
|
### Netease NOS ###
|
||||||
|
|
||||||
For Netease NOS configure as per the configurator `rclone config`
|
For Netease NOS configure as per the configurator `rclone config`
|
||||||
|
@ -14587,7 +14877,8 @@ Note that Box is case insensitive so you can't have a file called
|
||||||
"Hello.doc" and one called "hello.doc".
|
"Hello.doc" and one called "hello.doc".
|
||||||
|
|
||||||
Box file names can't have the `\` character in. rclone maps this to
|
Box file names can't have the `\` character in. rclone maps this to
|
||||||
and from an identical looking unicode equivalent `\`.
|
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||||
|
Reverse Solidus).
|
||||||
|
|
||||||
Box only supports filenames up to 255 characters in length.
|
Box only supports filenames up to 255 characters in length.
|
||||||
|
|
||||||
|
@ -15869,23 +16160,26 @@ See: the [encoding section in the overview](https://rclone.org/overview/#encodin
|
||||||
Crypt
|
Crypt
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
The `crypt` remote encrypts and decrypts another remote.
|
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||||
|
|
||||||
To use it first set up the underlying remote following the config
|
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||||
instructions for that remote. You can also use a local pathname
|
config` instructions for that remote.
|
||||||
instead of a remote which will encrypt and decrypt from that directory
|
|
||||||
which might be useful for encrypting onto a USB stick for example.
|
|
||||||
|
|
||||||
First check your chosen remote is working - we'll call it
|
`crypt` applied to a local pathname instead of a remote will
|
||||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||||
will be encrypted and anything outside won't. This means that if you
|
removable drives.
|
||||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
|
||||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
|
||||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
|
||||||
name encryption) which may or may not be what you want.
|
|
||||||
|
|
||||||
Now configure `crypt` using `rclone config`. We will call this one
|
Before configuring the crypt remote, check the underlying remote is
|
||||||
`secret` to differentiate it from the `remote`.
|
working. In this example the underlying remote is called `remote:path`.
|
||||||
|
Anything inside `remote:path` will be encrypted and anything outside
|
||||||
|
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||||
|
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||||
|
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||||
|
file name encryption, rclone will encrypt the bucket name.
|
||||||
|
|
||||||
|
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||||
|
remote is called `secret`, to differentiate it from the underlying
|
||||||
|
`remote`.
|
||||||
|
|
||||||
```
|
```
|
||||||
No remotes found - make a new one
|
No remotes found - make a new one
|
||||||
|
@ -15959,49 +16253,42 @@ d) Delete this remote
|
||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
**Important** The password is stored in the config file is lightly
|
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||||
obscured so it isn't immediately obvious what it is. It is in no way
|
obscured. That only protects it from cursory inspection. It is not
|
||||||
secure unless you use config file encryption.
|
secure unless encryption of `rclone.conf` is specified.
|
||||||
|
|
||||||
A long passphrase is recommended, or you can use a random one.
|
A long passphrase is recommended, or `rclone config` can generate a
|
||||||
|
random one.
|
||||||
|
|
||||||
The obscured password is created by using AES-CTR with a static key, with
|
The obscured password is created using AES-CTR with a static key. The
|
||||||
the salt stored verbatim at the beginning of the obscured password. This
|
salt is stored verbatim at the beginning of the obscured password. This
|
||||||
static key is shared by between all versions of rclone.
|
static key is shared between all versions of rclone.
|
||||||
|
|
||||||
If you reconfigure rclone with the same passwords/passphrases
|
If you reconfigure rclone with the same passwords/passphrases
|
||||||
elsewhere it will be compatible, but the obscured version will be different
|
elsewhere it will be compatible, but the obscured version will be different
|
||||||
due to the different salt.
|
due to the different salt.
|
||||||
|
|
||||||
Note that rclone does not encrypt
|
Rclone does not encrypt
|
||||||
|
|
||||||
* file length - this can be calculated within 16 bytes
|
* file length - this can be calculated within 16 bytes
|
||||||
* modification time - used for syncing
|
* modification time - used for syncing
|
||||||
|
|
||||||
## Specifying the remote ##
|
## Specifying the remote ##
|
||||||
|
|
||||||
In normal use, make sure the remote has a `:` in. If you specify the
|
In normal use, ensure the remote has a `:` in. If specified without,
|
||||||
remote without a `:` then rclone will use a local directory of that
|
rclone uses a local directory of that name. For example if a remote
|
||||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||||
will encrypt stuff to that directory. If you use a remote of `name`
|
directory. If a remote `name` is specified, rclone targets a directory
|
||||||
then rclone will put files in a directory called `name` in the current
|
`name` in the current directory.
|
||||||
directory.
|
|
||||||
|
|
||||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
files in `path/to/dir` on the remote. With file name encryption, files
|
||||||
file name encryption, then when you save files to
|
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
|
||||||
|
|
||||||
Note that unless you want encrypted bucket names (which are difficult
|
|
||||||
to manage because you won't know what directory they represent in web
|
|
||||||
interfaces etc), you should probably specify a bucket, eg
|
|
||||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
|
||||||
Swift, Hubic, B2, GCS.
|
|
||||||
|
|
||||||
## Example ##
|
## Example ##
|
||||||
|
|
||||||
To test I made a little directory of files using "standard" file name
|
Create the following file structure using "standard" file name
|
||||||
encryption.
|
encryption.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -16015,7 +16302,7 @@ plaintext/
|
||||||
└── file4.txt
|
└── file4.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy these to the remote and list them back
|
Copy these to the remote, and list them
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q copy plaintext secret:
|
$ rclone -q copy plaintext secret:
|
||||||
|
@ -16027,7 +16314,7 @@ $ rclone -q ls secret:
|
||||||
9 subdir/file3.txt
|
9 subdir/file3.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Now see what that looked like when encrypted
|
The crypt remote looks like
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
|
@ -16038,7 +16325,7 @@ $ rclone -q ls remote:path
|
||||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this retains the directory structure which means you can do this
|
The directory structure is preserved
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls secret:subdir
|
$ rclone -q ls secret:subdir
|
||||||
|
@ -16047,9 +16334,9 @@ $ rclone -q ls secret:subdir
|
||||||
10 subsubdir/file4.txt
|
10 subsubdir/file4.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
If don't use file name encryption then the remote will look like this
|
Without file name encryption `.bin` extensions are added to underlying
|
||||||
- note the `.bin` extensions added to prevent the cloud provider
|
names. This prevents the cloud provider attempting to interpret file
|
||||||
attempting to interpret the data.
|
content.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
|
@ -16062,8 +16349,6 @@ $ rclone -q ls remote:path
|
||||||
|
|
||||||
### File name encryption modes ###
|
### File name encryption modes ###
|
||||||
|
|
||||||
Here are some of the features of the file name encryption modes
|
|
||||||
|
|
||||||
Off
|
Off
|
||||||
|
|
||||||
* doesn't hide file names or directory structure
|
* doesn't hide file names or directory structure
|
||||||
|
@ -16082,17 +16367,19 @@ Standard
|
||||||
Obfuscation
|
Obfuscation
|
||||||
|
|
||||||
This is a simple "rotate" of the filename, with each file having a rot
|
This is a simple "rotate" of the filename, with each file having a rot
|
||||||
distance based on the filename. We store the distance at the beginning
|
distance based on the filename. Rclone stores the distance at the
|
||||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||||
|
|
||||||
This is not a strong encryption of filenames, but it may stop automated
|
Obfuscation is not a strong encryption of filenames, but hinders
|
||||||
scanning tools from picking up on filename patterns. As such it's an
|
automated scanning tools picking up on filename patterns. It is an
|
||||||
intermediate between "off" and "standard". The advantage is that it
|
intermediate between "off" and "standard" which allows for longer path
|
||||||
allows for longer path segment names.
|
segment names.
|
||||||
|
|
||||||
There is a possibility with some unicode based filenames that the
|
There is a possibility with some unicode based filenames that the
|
||||||
obfuscation is weak and may map lower case characters to upper case
|
obfuscation is weak and may map lower case characters to upper case
|
||||||
equivalents. You can not rely on this for strong protection.
|
equivalents.
|
||||||
|
|
||||||
|
Obfuscation cannot be relied upon for strong protection.
|
||||||
|
|
||||||
* file names very lightly obfuscated
|
* file names very lightly obfuscated
|
||||||
* file names can be longer than standard encryption
|
* file names can be longer than standard encryption
|
||||||
|
@ -16100,13 +16387,14 @@ equivalents. You can not rely on this for strong protection.
|
||||||
* directory structure visible
|
* directory structure visible
|
||||||
* identical files names will have identical uploaded names
|
* identical files names will have identical uploaded names
|
||||||
|
|
||||||
Cloud storage systems have various limits on file name length and
|
Cloud storage systems have limits on file name length and
|
||||||
total path length which you are more likely to hit using "Standard"
|
total path length which rclone is more likely to breach using
|
||||||
file name encryption. If you keep your file names to below 156
|
"Standard" file name encryption. Where file names are less thn 156
|
||||||
characters in length then you should be OK on all providers.
|
characters in length issues should not be encountered, irrespective of
|
||||||
|
cloud storage provider.
|
||||||
|
|
||||||
There may be an even more secure file name encryption mode in the
|
An alternative, future rclone file name encryption mode may tolerate
|
||||||
future which will address the long file name problem.
|
backend provider path length limits.
|
||||||
|
|
||||||
### Directory name encryption ###
|
### Directory name encryption ###
|
||||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||||
|
@ -16132,10 +16420,10 @@ Example:
|
||||||
Crypt stores modification times using the underlying remote so support
|
Crypt stores modification times using the underlying remote so support
|
||||||
depends on that.
|
depends on that.
|
||||||
|
|
||||||
Hashes are not stored for crypt. However the data integrity is
|
Hashes are not stored for crypt. However the data integrity is
|
||||||
protected by an extremely strong crypto authenticator.
|
protected by an extremely strong crypto authenticator.
|
||||||
|
|
||||||
Note that you should use the `rclone cryptcheck` command to check the
|
Use the `rclone cryptcheck` command to check the
|
||||||
integrity of a crypted remote instead of `rclone check` which can't
|
integrity of a crypted remote instead of `rclone check` which can't
|
||||||
check the checksums properly.
|
check the checksums properly.
|
||||||
|
|
||||||
|
@ -17997,8 +18285,10 @@ Here are the standard options specific to drive (Google Drive).
|
||||||
|
|
||||||
#### --drive-client-id
|
#### --drive-client-id
|
||||||
|
|
||||||
OAuth Client Id
|
Google Application Client Id
|
||||||
Leave blank normally.
|
Setting your own is recommended.
|
||||||
|
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
|
||||||
|
If you leave this blank, it will use an internal key which is low performance.
|
||||||
|
|
||||||
- Config: client_id
|
- Config: client_id
|
||||||
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
||||||
|
@ -19688,8 +19978,13 @@ flag.
|
||||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||||
source does not have an MD5 checksum then the file will be cached
|
source does not have an MD5 checksum then the file will be cached
|
||||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||||
to) before it is uploaded. Small files will be cached in memory - see
|
to) before it is uploaded. Small files will be cached in memory - see
|
||||||
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||||
|
When uploading from local disk the source checksum is always available,
|
||||||
|
so this does not apply. Starting with rclone version 1.52 the same is
|
||||||
|
true for crypted remotes (in older versions the crypt backend would not
|
||||||
|
calculate hashes for uploads from local disk, so the Jottacloud
|
||||||
|
backend had to do it as described above).
|
||||||
|
|
||||||
#### Restricted filename characters
|
#### Restricted filename characters
|
||||||
|
|
||||||
|
@ -25432,6 +25727,100 @@ Options:
|
||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.53.3 - 2020-11-19
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* random: Fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924 (Nick Craig-Wood)
|
||||||
|
* Passwords you have generated with `rclone config` may be insecure
|
||||||
|
* See [issue #4783](https://github.com/rclone/rclone/issues/4783) for more details and a checking tool
|
||||||
|
* random: Seed math/rand in one place with crypto strong seed (Nick Craig-Wood)
|
||||||
|
* VFS
|
||||||
|
* Fix vfs/refresh calls with fs= parameter (Nick Craig-Wood)
|
||||||
|
* Sharefile
|
||||||
|
* Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||||
|
|
||||||
|
## v1.53.2 - 2020-10-26
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* acounting
|
||||||
|
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||||
|
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||||
|
* operations
|
||||||
|
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||||
|
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||||
|
* build
|
||||||
|
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||||
|
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||||
|
* Mount
|
||||||
|
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||||
|
* VFS
|
||||||
|
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||||
|
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||||
|
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||||
|
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||||
|
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||||
|
* Add a missed update of used cache space (Leo Luan)
|
||||||
|
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||||
|
* Local
|
||||||
|
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||||
|
* Chunker
|
||||||
|
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||||
|
* Fix upload over crypt (Ivan Andreev)
|
||||||
|
* Fichier
|
||||||
|
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||||
|
* Jottacloud
|
||||||
|
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||||
|
* Avoid double url escaping of device/mountpoint (albertony)
|
||||||
|
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||||
|
* Mailru
|
||||||
|
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||||
|
* Fix range requests after june changes on server (Ivan Andreev)
|
||||||
|
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||||
|
* Onedrive
|
||||||
|
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||||
|
* S3
|
||||||
|
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||||
|
* Seafile
|
||||||
|
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||||
|
* SFTP
|
||||||
|
* Always convert the checksum to lower case (buengese)
|
||||||
|
* Union
|
||||||
|
* Create root directories if none exist (Nick Craig-Wood)
|
||||||
|
|
||||||
|
## v1.53.1 - 2020-09-13
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||||
|
* check
|
||||||
|
* Add back missing --download flag (Nick Craig-Wood)
|
||||||
|
* Fix docs (Nick Craig-Wood)
|
||||||
|
* docs
|
||||||
|
* Note --log-file does append (Nick Craig-Wood)
|
||||||
|
* Add full stops for consistency in rclone --help (edwardxml)
|
||||||
|
* Add Tencent COS to s3 provider list (wjielai)
|
||||||
|
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||||
|
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||||
|
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||||
|
* build
|
||||||
|
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||||
|
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||||
|
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||||
|
* VFS
|
||||||
|
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||||
|
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||||
|
* Local
|
||||||
|
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||||
|
* Drive
|
||||||
|
* Re-adds special oauth help text (Tim Gallant)
|
||||||
|
* Opendrive
|
||||||
|
* Do not retry 400 errors (Evan Harris)
|
||||||
|
|
||||||
## v1.53.0 - 2020-09-02
|
## v1.53.0 - 2020-09-02
|
||||||
|
|
||||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||||
|
|
17137
MANUAL.txt
generated
17137
MANUAL.txt
generated
File diff suppressed because it is too large
Load diff
34
Makefile
34
Makefile
|
@ -8,7 +8,8 @@ VERSION := $(shell cat VERSION)
|
||||||
# Last tag on this branch
|
# Last tag on this branch
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
# Next version
|
# Next version
|
||||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||||
|
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||||
# If we are working on a release, override branch to master
|
# If we are working on a release, override branch to master
|
||||||
ifdef RELEASE_TAG
|
ifdef RELEASE_TAG
|
||||||
BRANCH := master
|
BRANCH := master
|
||||||
|
@ -45,13 +46,13 @@ endif
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
|
@ -164,6 +165,11 @@ validate_website: website
|
||||||
tarball:
|
tarball:
|
||||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||||
|
|
||||||
|
vendorball:
|
||||||
|
go mod vendor
|
||||||
|
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||||
|
rm -rf vendor
|
||||||
|
|
||||||
sign_upload:
|
sign_upload:
|
||||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||||
|
@ -182,10 +188,10 @@ upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
|
|
||||||
cross: doc
|
cross: doc
|
||||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
|
@ -193,23 +199,23 @@ log_since_last_release:
|
||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
compile_all:
|
compile_all:
|
||||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
|
|
||||||
ci_upload:
|
ci_upload:
|
||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
find build -type l -delete
|
find build -type l -delete
|
||||||
gzip -r9v build
|
gzip -r9v build
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
ifndef BRANCH_PATH
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
ci_beta:
|
ci_beta:
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifndef BRANCH_PATH
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
@ -239,7 +245,15 @@ startdev:
|
||||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||||
echo "$(NEXT_VERSION)" > VERSION
|
echo "$(NEXT_VERSION)" > VERSION
|
||||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
|
startstable:
|
||||||
|
@echo "Version is $(VERSION)"
|
||||||
|
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||||
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
|
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||||
|
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||||
|
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|
|
@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||||
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
|
|
57
RELEASE.md
57
RELEASE.md
|
@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
* git checkout master
|
* git checkout master # see below for stable branch
|
||||||
* git pull
|
* git pull
|
||||||
* git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
* Check GitHub actions build for master is Green
|
* Check GitHub actions build for master is Green
|
||||||
|
@ -25,12 +25,13 @@ This file describes how to make the various kinds of releases
|
||||||
* # Wait for the GitHub builds to complete then...
|
* # Wait for the GitHub builds to complete then...
|
||||||
* make fetch_binaries
|
* make fetch_binaries
|
||||||
* make tarball
|
* make tarball
|
||||||
|
* make vendorball
|
||||||
* make sign_upload
|
* make sign_upload
|
||||||
* make check_sign
|
* make check_sign
|
||||||
* make upload
|
* make upload
|
||||||
* make upload_website
|
* make upload_website
|
||||||
* make upload_github
|
* make upload_github
|
||||||
* make startdev
|
* make startdev # make startstable for stable branch
|
||||||
* # announce with forum post, twitter post, patreon post
|
* # announce with forum post, twitter post, patreon post
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies
|
Early in the next release cycle update the dependencies
|
||||||
|
@ -41,62 +42,34 @@ Early in the next release cycle update the dependencies
|
||||||
* git add new files
|
* git add new files
|
||||||
* git commit -a -v
|
* git commit -a -v
|
||||||
|
|
||||||
If `make update` fails with errors like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
# github.com/cpuguy83/go-md2man/md2man
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
|
||||||
```
|
|
||||||
|
|
||||||
Can be fixed with
|
|
||||||
|
|
||||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
|
||||||
* GO111MODULE=on go mod tidy
|
|
||||||
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
If rclone needs a point release due to some horrendous bug:
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
Set vars
|
||||||
this will be done already.
|
|
||||||
|
|
||||||
* BASE_TAG=v1.XX # eg v1.52
|
* BASE_TAG=v1.XX # eg v1.52
|
||||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||||
|
|
||||||
|
First make the release branch. If this is a second point release then
|
||||||
|
this will be done already.
|
||||||
|
|
||||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||||
|
* git co ${BASE_TAG}-stable
|
||||||
|
* make startstable
|
||||||
|
|
||||||
Now
|
Now
|
||||||
|
|
||||||
* FIXME this is now broken with new semver layout - needs fixing
|
|
||||||
* FIXME the TAG=${NEW_TAG} shouldn't be necessary any more
|
|
||||||
* git co ${BASE_TAG}-stable
|
* git co ${BASE_TAG}-stable
|
||||||
* git cherry-pick any fixes
|
* git cherry-pick any fixes
|
||||||
* Test (see above)
|
* Do the steps as above
|
||||||
* make NEXT_VERSION=${NEW_TAG} tag
|
* make startstable
|
||||||
* edit docs/content/changelog.md
|
|
||||||
* make TAG=${NEW_TAG} doc
|
|
||||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
|
||||||
* git tag -d ${NEW_TAG}
|
|
||||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
|
||||||
* git push --tags -u origin ${BASE_TAG}-stable
|
|
||||||
* Wait for builds to complete
|
|
||||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
|
||||||
* make TAG=${NEW_TAG} tarball
|
|
||||||
* make TAG=${NEW_TAG} sign_upload
|
|
||||||
* make TAG=${NEW_TAG} check_sign
|
|
||||||
* make TAG=${NEW_TAG} upload
|
|
||||||
* make TAG=${NEW_TAG} upload_website
|
|
||||||
* make TAG=${NEW_TAG} upload_github
|
|
||||||
* NB this overwrites the current beta so we need to do this
|
|
||||||
* git co master
|
* git co master
|
||||||
* make VERSION=${NEW_TAG} startdev
|
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||||
* # cherry pick the changes to the changelog and VERSION
|
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git commit --amend
|
|
||||||
* git push
|
* git push
|
||||||
* Announce!
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v1.53.0
|
v1.53.4
|
||||||
|
|
|
@ -121,6 +121,8 @@ const maxTransactionProbes = 100
|
||||||
// standard chunker errors
|
// standard chunker errors
|
||||||
var (
|
var (
|
||||||
ErrChunkOverflow = errors.New("chunk number overflow")
|
ErrChunkOverflow = errors.New("chunk number overflow")
|
||||||
|
ErrMetaTooBig = errors.New("metadata is too big")
|
||||||
|
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
|
||||||
)
|
)
|
||||||
|
|
||||||
// variants of baseMove's parameter delMode
|
// variants of baseMove's parameter delMode
|
||||||
|
@ -296,6 +298,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -691,43 +695,47 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||||
switch entry := dirOrObject.(type) {
|
switch entry := dirOrObject.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
remote := entry.Remote()
|
remote := entry.Remote()
|
||||||
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
|
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
|
||||||
if xactID != "" {
|
if mainRemote == "" {
|
||||||
if revealHidden {
|
// this is meta object or standalone file
|
||||||
fs.Infof(f, "ignore temporary chunk %q", remote)
|
object := f.newObject("", entry, nil)
|
||||||
}
|
byRemote[remote] = object
|
||||||
break
|
tempEntries = append(tempEntries, object)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// this is some kind of chunk
|
||||||
|
// metobject should have been created above if present
|
||||||
|
isSpecial := xactID != "" || ctrlType != ""
|
||||||
|
mainObject := byRemote[mainRemote]
|
||||||
|
if mainObject == nil && f.useMeta && !isSpecial {
|
||||||
|
fs.Debugf(f, "skip orphan data chunk %q", remote)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if mainObject == nil && !f.useMeta {
|
||||||
|
// this is the "nometa" case
|
||||||
|
// create dummy chunked object without metadata
|
||||||
|
mainObject = f.newObject(mainRemote, nil, nil)
|
||||||
|
byRemote[mainRemote] = mainObject
|
||||||
|
if !badEntry[mainRemote] {
|
||||||
|
tempEntries = append(tempEntries, mainObject)
|
||||||
}
|
}
|
||||||
if ctrlType != "" {
|
}
|
||||||
if revealHidden {
|
if isSpecial {
|
||||||
fs.Infof(f, "ignore control chunk %q", remote)
|
if revealHidden {
|
||||||
}
|
fs.Infof(f, "ignore non-data chunk %q", remote)
|
||||||
break
|
|
||||||
}
|
}
|
||||||
mainObject := byRemote[mainRemote]
|
// need to read metadata to ensure actual object type
|
||||||
if mainObject == nil && f.useMeta {
|
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
||||||
fs.Debugf(f, "skip chunk %q without meta object", remote)
|
mainObject.unsure = true
|
||||||
break
|
|
||||||
}
|
|
||||||
if mainObject == nil {
|
|
||||||
// useMeta is false - create chunked object without metadata
|
|
||||||
mainObject = f.newObject(mainRemote, nil, nil)
|
|
||||||
byRemote[mainRemote] = mainObject
|
|
||||||
if !badEntry[mainRemote] {
|
|
||||||
tempEntries = append(tempEntries, mainObject)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
|
||||||
if f.opt.FailHard {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
badEntry[mainRemote] = true
|
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
object := f.newObject("", entry, nil)
|
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||||
byRemote[remote] = object
|
if f.opt.FailHard {
|
||||||
tempEntries = append(tempEntries, object)
|
return nil, err
|
||||||
|
}
|
||||||
|
badEntry[mainRemote] = true
|
||||||
|
}
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
isSubdir[entry.Remote()] = true
|
isSubdir[entry.Remote()] = true
|
||||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||||
|
@ -782,6 +790,13 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||||
// but opening even a small file can be slow on some backends.
|
// but opening even a small file can be slow on some backends.
|
||||||
//
|
//
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
return f.scanObject(ctx, remote, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanObject is like NewObject with optional quick scan mode.
|
||||||
|
// The quick mode avoids directory requests other than `List`,
|
||||||
|
// ignores non-chunked objects and skips chunk size checks.
|
||||||
|
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(false, remote); err != nil {
|
if err := f.forbidChunk(false, remote); err != nil {
|
||||||
return nil, errors.Wrap(err, "can't access")
|
return nil, errors.Wrap(err, "can't access")
|
||||||
}
|
}
|
||||||
|
@ -842,8 +857,15 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
continue // bypass regexp to save cpu
|
continue // bypass regexp to save cpu
|
||||||
}
|
}
|
||||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
|
if mainRemote == "" || mainRemote != remote {
|
||||||
continue // skip non-conforming, temporary and control chunks
|
continue // skip non-conforming chunks
|
||||||
|
}
|
||||||
|
if ctrlType != "" || xactID != "" {
|
||||||
|
if f.useMeta {
|
||||||
|
// temporary/control chunk calls for lazy metadata read
|
||||||
|
o.unsure = true
|
||||||
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||||
|
@ -853,7 +875,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
|
||||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||||
// Scanning hasn't found data chunks with conforming names.
|
// Scanning hasn't found data chunks with conforming names.
|
||||||
if f.useMeta {
|
if f.useMeta || quickScan {
|
||||||
// Metadata is required but absent and there are no chunks.
|
// Metadata is required but absent and there are no chunks.
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
|
@ -876,8 +898,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
// file without metadata. Validate it and update the total data size.
|
// file without metadata. Validate it and update the total data size.
|
||||||
// As an optimization, skip metadata reading here - we will call
|
// As an optimization, skip metadata reading here - we will call
|
||||||
// readMetadata lazily when needed (reading can be expensive).
|
// readMetadata lazily when needed (reading can be expensive).
|
||||||
if err := o.validate(); err != nil {
|
if !quickScan {
|
||||||
return nil, err
|
if err := o.validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
@ -886,13 +910,24 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||||
if o.isFull {
|
if o.isFull {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !o.isComposite() || !o.f.useMeta {
|
if !o.f.useMeta || (!o.isComposite() && !o.unsure) {
|
||||||
o.isFull = true
|
o.isFull = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate metadata
|
// validate metadata
|
||||||
metaObject := o.main
|
metaObject := o.main
|
||||||
|
if metaObject.Size() > maxMetadataSize {
|
||||||
|
if o.unsure {
|
||||||
|
// this is not metadata but a foreign object
|
||||||
|
o.unsure = false
|
||||||
|
o.chunks = nil // make isComposite return false
|
||||||
|
o.isFull = true // cache results
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrMetaTooBig
|
||||||
|
}
|
||||||
|
|
||||||
reader, err := metaObject.Open(ctx)
|
reader, err := metaObject.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -905,8 +940,22 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||||
|
|
||||||
switch o.f.opt.MetaFormat {
|
switch o.f.opt.MetaFormat {
|
||||||
case "simplejson":
|
case "simplejson":
|
||||||
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
|
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
|
||||||
if err != nil {
|
if o.unsure {
|
||||||
|
o.unsure = false
|
||||||
|
if !madeByChunker {
|
||||||
|
// this is not metadata but a foreign object
|
||||||
|
o.chunks = nil // make isComposite return false
|
||||||
|
o.isFull = true // cache results
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
// fall thru
|
||||||
|
case ErrMetaTooBig, ErrMetaUnknown:
|
||||||
|
return err // return these errors unwrapped for unit tests
|
||||||
|
default:
|
||||||
return errors.Wrap(err, "invalid metadata")
|
return errors.Wrap(err, "invalid metadata")
|
||||||
}
|
}
|
||||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||||
|
@ -921,7 +970,27 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// put implements Put, PutStream, PutUnchecked, Update
|
// put implements Put, PutStream, PutUnchecked, Update
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
|
func (f *Fs) put(
|
||||||
|
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||||
|
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||||
|
|
||||||
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
|
return nil, errors.Wrap(err, action+" refused")
|
||||||
|
}
|
||||||
|
if target == nil {
|
||||||
|
// Get target object with a quick directory scan
|
||||||
|
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
||||||
|
target = obj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if target != nil {
|
||||||
|
obj := target.(*Object)
|
||||||
|
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||||
|
// refuse to update a file of unsupported format
|
||||||
|
return nil, errors.Wrap(err, "refusing to "+action)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c := f.newChunkingReader(src)
|
c := f.newChunkingReader(src)
|
||||||
wrapIn := c.wrapStream(ctx, in, src)
|
wrapIn := c.wrapStream(ctx, in, src)
|
||||||
|
|
||||||
|
@ -958,6 +1027,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||||
}
|
}
|
||||||
info := f.wrapInfo(src, chunkRemote, size)
|
info := f.wrapInfo(src, chunkRemote, size)
|
||||||
|
|
||||||
|
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||||
|
c.chunkLimit = c.chunkSize
|
||||||
// TODO: handle range/limit options
|
// TODO: handle range/limit options
|
||||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||||
if errChunk != nil {
|
if errChunk != nil {
|
||||||
|
@ -1009,8 +1080,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||||
// Check for input that looks like valid metadata
|
// Check for input that looks like valid metadata
|
||||||
needMeta := len(c.chunks) > 1
|
needMeta := len(c.chunks) > 1
|
||||||
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
|
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
|
||||||
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
|
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
|
||||||
needMeta = err == nil
|
needMeta = madeByChunker
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalize small object as non-chunked.
|
// Finalize small object as non-chunked.
|
||||||
|
@ -1166,10 +1237,14 @@ func (c *chunkingReader) updateHashes() {
|
||||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||||
if c.chunkLimit <= 0 {
|
if c.chunkLimit <= 0 {
|
||||||
// Chunk complete - switch to next one.
|
// Chunk complete - switch to next one.
|
||||||
|
// Note #1:
|
||||||
// We might not get here because some remotes (eg. box multi-uploader)
|
// We might not get here because some remotes (eg. box multi-uploader)
|
||||||
// read the specified size exactly and skip the concluding EOF Read.
|
// read the specified size exactly and skip the concluding EOF Read.
|
||||||
// Then a check in the put loop will kick in.
|
// Then a check in the put loop will kick in.
|
||||||
c.chunkLimit = c.chunkSize
|
// Note #2:
|
||||||
|
// The crypt backend after receiving EOF here will call Read again
|
||||||
|
// and we must insist on returning EOF, so we postpone refilling
|
||||||
|
// chunkLimit to the main loop.
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
if int64(len(buf)) > c.chunkLimit {
|
if int64(len(buf)) > c.chunkLimit {
|
||||||
|
@ -1253,29 +1328,16 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
|
||||||
return nil, errors.Wrap(err, "refusing to put")
|
|
||||||
}
|
|
||||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
|
||||||
return nil, errors.Wrap(err, "refusing to upload")
|
|
||||||
}
|
|
||||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
|
||||||
return errors.Wrap(err, "update refused")
|
|
||||||
}
|
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
|
||||||
// refuse to update a file of unsupported format
|
|
||||||
return errors.Wrap(err, "refusing to update")
|
|
||||||
}
|
|
||||||
basePut := o.f.base.Put
|
basePut := o.f.base.Put
|
||||||
if src.Size() < 0 {
|
if src.Size() < 0 {
|
||||||
basePut = o.f.base.Features().PutStream
|
basePut = o.f.base.Features().PutStream
|
||||||
|
@ -1283,7 +1345,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
return errors.New("wrapped file system does not support streaming uploads")
|
return errors.New("wrapped file system does not support streaming uploads")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
|
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
*o = *oNew.(*Object)
|
*o = *oNew.(*Object)
|
||||||
}
|
}
|
||||||
|
@ -1397,7 +1459,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||||
return errors.Wrap(err, "refuse to corrupt")
|
return errors.Wrap(err, "refuse to corrupt")
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||||
// Proceed but warn user that unexpected things can happen.
|
// Proceed but warn user that unexpected things can happen.
|
||||||
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
|
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1425,6 +1487,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||||
if err := f.forbidChunk(o, remote); err != nil {
|
if err := f.forbidChunk(o, remote); err != nil {
|
||||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||||
}
|
}
|
||||||
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
|
// Refuse to copy/move composite files with invalid or future
|
||||||
|
// metadata format which might involve unsupported chunk types.
|
||||||
|
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||||
|
}
|
||||||
if !o.isComposite() {
|
if !o.isComposite() {
|
||||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||||
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
|
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
|
||||||
|
@ -1433,11 +1500,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||||
}
|
}
|
||||||
return f.newObject("", oResult, nil), nil
|
return f.newObject("", oResult, nil), nil
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
|
||||||
// Refuse to copy/move composite files with invalid or future
|
|
||||||
// metadata format which might involve unsupported chunk types.
|
|
||||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
|
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
|
||||||
mainRemote := o.remote
|
mainRemote := o.remote
|
||||||
|
@ -1528,6 +1590,10 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if obj.unsure {
|
||||||
|
// ensure object is composite if need to re-read metadata
|
||||||
|
_ = obj.readMetadata(ctx)
|
||||||
|
}
|
||||||
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
|
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
|
||||||
if !requireMetaHash && !f.hashAll {
|
if !requireMetaHash && !f.hashAll {
|
||||||
ok = true // hash is not required for metadata
|
ok = true // hash is not required for metadata
|
||||||
|
@ -1711,6 +1777,7 @@ type Object struct {
|
||||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||||
isFull bool // true if metadata has been read
|
isFull bool // true if metadata has been read
|
||||||
|
unsure bool // true if need to read metadata to detect object type
|
||||||
md5 string
|
md5 string
|
||||||
sha1 string
|
sha1 string
|
||||||
f *Fs
|
f *Fs
|
||||||
|
@ -1861,15 +1928,16 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||||
// on the level of wrapped remote but chunker is unaware of that.
|
// on the level of wrapped remote but chunker is unaware of that.
|
||||||
//
|
//
|
||||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||||
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
|
return "", err // valid metadata is required to get hash, abort
|
||||||
|
}
|
||||||
if !o.isComposite() {
|
if !o.isComposite() {
|
||||||
// First, chain to the wrapped non-chunked file if possible.
|
// First, chain to the wrapped non-chunked file if possible.
|
||||||
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
|
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
|
||||||
return value, nil
|
return value, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
|
||||||
return "", err // valid metadata is required to get hash, abort
|
|
||||||
}
|
|
||||||
// Try hash from metadata if the file is composite or if wrapped remote fails.
|
// Try hash from metadata if the file is composite or if wrapped remote fails.
|
||||||
switch hashType {
|
switch hashType {
|
||||||
case hash.MD5:
|
case hash.MD5:
|
||||||
|
@ -1894,13 +1962,13 @@ func (o *Object) UnWrap() fs.Object {
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
if !o.isComposite() {
|
|
||||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
|
||||||
}
|
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
// refuse to open unsupported format
|
// refuse to open unsupported format
|
||||||
return nil, errors.Wrap(err, "can't open")
|
return nil, errors.Wrap(err, "can't open")
|
||||||
}
|
}
|
||||||
|
if !o.isComposite() {
|
||||||
|
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||||
|
}
|
||||||
|
|
||||||
var openOptions []fs.OpenOption
|
var openOptions []fs.OpenOption
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
|
@ -2173,57 +2241,57 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
||||||
// handled by current implementation.
|
// handled by current implementation.
|
||||||
// The version check below will then explicitly ask user to upgrade rclone.
|
// The version check below will then explicitly ask user to upgrade rclone.
|
||||||
//
|
//
|
||||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
|
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||||
// Be strict about JSON format
|
// Be strict about JSON format
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
if data != nil && len(data) > maxMetadataSize {
|
if data != nil && len(data) > maxMetadataSize {
|
||||||
return nil, errors.New("too big")
|
return nil, false, ErrMetaTooBig
|
||||||
}
|
}
|
||||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||||
return nil, errors.New("invalid json")
|
return nil, false, errors.New("invalid json")
|
||||||
}
|
}
|
||||||
var metadata metaSimpleJSON
|
var metadata metaSimpleJSON
|
||||||
err = json.Unmarshal(data, &metadata)
|
err = json.Unmarshal(data, &metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
// Basic fields are strictly required
|
// Basic fields are strictly required
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
|
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
|
||||||
return nil, errors.New("missing required field")
|
return nil, false, errors.New("missing required field")
|
||||||
}
|
}
|
||||||
// Perform strict checks, avoid corruption of future metadata formats.
|
// Perform strict checks, avoid corruption of future metadata formats.
|
||||||
if *metadata.Version < 1 {
|
if *metadata.Version < 1 {
|
||||||
return nil, errors.New("wrong version")
|
return nil, false, errors.New("wrong version")
|
||||||
}
|
}
|
||||||
if *metadata.Size < 0 {
|
if *metadata.Size < 0 {
|
||||||
return nil, errors.New("negative file size")
|
return nil, false, errors.New("negative file size")
|
||||||
}
|
}
|
||||||
if *metadata.ChunkNum < 0 {
|
if *metadata.ChunkNum < 0 {
|
||||||
return nil, errors.New("negative number of chunks")
|
return nil, false, errors.New("negative number of chunks")
|
||||||
}
|
}
|
||||||
if *metadata.ChunkNum > maxSafeChunkNumber {
|
if *metadata.ChunkNum > maxSafeChunkNumber {
|
||||||
return nil, ErrChunkOverflow
|
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
|
||||||
}
|
}
|
||||||
if metadata.MD5 != "" {
|
if metadata.MD5 != "" {
|
||||||
_, err = hex.DecodeString(metadata.MD5)
|
_, err = hex.DecodeString(metadata.MD5)
|
||||||
if len(metadata.MD5) != 32 || err != nil {
|
if len(metadata.MD5) != 32 || err != nil {
|
||||||
return nil, errors.New("wrong md5 hash")
|
return nil, false, errors.New("wrong md5 hash")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if metadata.SHA1 != "" {
|
if metadata.SHA1 != "" {
|
||||||
_, err = hex.DecodeString(metadata.SHA1)
|
_, err = hex.DecodeString(metadata.SHA1)
|
||||||
if len(metadata.SHA1) != 40 || err != nil {
|
if len(metadata.SHA1) != 40 || err != nil {
|
||||||
return nil, errors.New("wrong sha1 hash")
|
return nil, false, errors.New("wrong sha1 hash")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// ChunkNum is allowed to be 0 in future versions
|
// ChunkNum is allowed to be 0 in future versions
|
||||||
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
|
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
|
||||||
return nil, errors.New("wrong number of chunks")
|
return nil, false, errors.New("wrong number of chunks")
|
||||||
}
|
}
|
||||||
// Non-strict mode also accepts future metadata versions
|
// Non-strict mode also accepts future metadata versions
|
||||||
if *metadata.Version > metadataVersion && strictChecks {
|
if *metadata.Version > metadataVersion {
|
||||||
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
|
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
|
||||||
}
|
}
|
||||||
|
|
||||||
var nilFs *Fs // nil object triggers appropriate type method
|
var nilFs *Fs // nil object triggers appropriate type method
|
||||||
|
@ -2231,7 +2299,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte,
|
||||||
info.nChunks = *metadata.ChunkNum
|
info.nChunks = *metadata.ChunkNum
|
||||||
info.md5 = metadata.MD5
|
info.md5 = metadata.MD5
|
||||||
info.sha1 = metadata.SHA1
|
info.sha1 = metadata.SHA1
|
||||||
return info, nil
|
return info, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func silentlyRemove(ctx context.Context, o fs.Object) {
|
func silentlyRemove(ctx context.Context, o fs.Object) {
|
||||||
|
|
|
@ -157,6 +157,17 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func driveOAuthOptions() []fs.Option {
|
||||||
|
opts := []fs.Option{}
|
||||||
|
for _, opt := range oauthutil.SharedOptions {
|
||||||
|
if opt.Name == config.ConfigClientID {
|
||||||
|
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
|
||||||
|
}
|
||||||
|
opts = append(opts, opt)
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
|
@ -192,7 +203,7 @@ func init() {
|
||||||
log.Fatalf("Failed to configure team drive: %v", err)
|
log.Fatalf("Failed to configure team drive: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||||
Name: "scope",
|
Name: "scope",
|
||||||
Help: "Scope that rclone should use when requesting access from drive.",
|
Help: "Scope that rclone should use when requesting access from drive.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
|
|
|
@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if size > int64(100e9) {
|
if size > int64(300e9) {
|
||||||
return nil, errors.New("File too big, cant upload")
|
return nil, errors.New("File too big, cant upload")
|
||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
|
|
|
@ -841,20 +841,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
var newObject *storage.Object
|
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
if !f.opt.BucketPolicyOnly {
|
||||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||||
if !f.opt.BucketPolicyOnly {
|
}
|
||||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
var rewriteResponse *storage.RewriteResponse
|
||||||
|
for {
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||||
|
return shouldRetry(err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
newObject, err = copyObject.Context(ctx).Do()
|
if rewriteResponse.Done {
|
||||||
return shouldRetry(err)
|
break
|
||||||
})
|
}
|
||||||
if err != nil {
|
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||||
return nil, err
|
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||||
}
|
}
|
||||||
// Set the metadata for the new object while we have it
|
// Set the metadata for the new object while we have it
|
||||||
dstObj.setMetaData(newObject)
|
dstObj.setMetaData(rewriteResponse.Resource)
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -373,6 +373,9 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
fmt.Printf("Login Token> ")
|
fmt.Printf("Login Token> ")
|
||||||
loginToken := config.ReadLine()
|
loginToken := config.ReadLine()
|
||||||
|
|
||||||
|
m.Set(configClientID, "jottacli")
|
||||||
|
m.Set(configClientSecret, "")
|
||||||
|
|
||||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to get oauth token: %s", err)
|
log.Fatalf("Failed to get oauth token: %s", err)
|
||||||
|
@ -384,7 +387,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
|
|
||||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||||
if config.Confirm(false) {
|
if config.Confirm(false) {
|
||||||
oauthConfig.ClientID = "jottacli"
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||||
|
@ -551,7 +553,7 @@ func (f *Fs) setEndpointURL() {
|
||||||
if f.opt.Mountpoint == "" {
|
if f.opt.Mountpoint == "" {
|
||||||
f.opt.Mountpoint = defaultMountpoint
|
f.opt.Mountpoint = defaultMountpoint
|
||||||
}
|
}
|
||||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
|
@ -728,6 +730,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
|
if err == fs.ErrorNotAFile {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -1087,8 +1092,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||||
retry, _ := shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
return (retry && resp.StatusCode != 500), err
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1192,18 +1196,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||||
|
|
||||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||||
|
|
||||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
|
||||||
// dir gets moved regardless
|
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
|
||||||
if apiErr.StatusCode == 500 {
|
|
||||||
_, err := f.NewObject(ctx, dstRemote)
|
|
||||||
if err == fs.ErrorNotAFile {
|
|
||||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't move directory")
|
return errors.Wrap(err, "couldn't move directory")
|
||||||
}
|
}
|
||||||
|
@ -1477,6 +1469,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
o.fs.tokenRenewer.Start()
|
||||||
|
defer o.fs.tokenRenewer.Stop()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
md5String, err := src.Hash(ctx, hash.MD5)
|
md5String, err := src.Hash(ctx, hash.MD5)
|
||||||
if err != nil || md5String == "" {
|
if err != nil || md5String == "" {
|
||||||
|
|
|
@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||||
// Set the file to be a sparse file (important on Windows)
|
// Set the file to be a sparse file (important on Windows)
|
||||||
err = file.SetSparse(out)
|
err = file.SetSparse(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1231,6 +1231,15 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||||
o.modTime = info.ModTime()
|
o.modTime = info.ModTime()
|
||||||
o.mode = info.Mode()
|
o.mode = info.Mode()
|
||||||
o.fs.objectMetaMu.Unlock()
|
o.fs.objectMetaMu.Unlock()
|
||||||
|
// On Windows links read as 0 size so set the correct size here
|
||||||
|
if runtime.GOOS == "windows" && o.translatedLink {
|
||||||
|
linkdst, err := os.Readlink(o.path)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||||
|
} else {
|
||||||
|
o.size = int64(len(linkdst))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat an Object into info
|
// Stat an Object into info
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
||||||
|
|
||||||
// Object viewed as symlink
|
// Object viewed as symlink
|
||||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
file2.Size = 0 // symlinks are 0 length under Windows
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object viewed as destination
|
// Object viewed as destination
|
||||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||||
|
@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
||||||
// Create a symlink
|
// Create a symlink
|
||||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
file3.Size = 0 // symlinks are 0 length under Windows
|
|
||||||
}
|
|
||||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||||
if haveLChtimes {
|
if haveLChtimes {
|
||||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||||
|
@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
||||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||||
if runtime.GOOS != "windows" {
|
assert.Equal(t, int64(8), o.Size())
|
||||||
assert.Equal(t, int64(8), o.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that NewObject doesn't see the non suffixed version
|
// Check that NewObject doesn't see the non suffixed version
|
||||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||||
|
|
|
@ -117,7 +117,7 @@ type ListItem struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Home string `json:"home"`
|
Home string `json:"home"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Mtime int64 `json:"mtime,omitempty"`
|
Mtime uint64 `json:"mtime,omitempty"`
|
||||||
Hash string `json:"hash,omitempty"`
|
Hash string `json:"hash,omitempty"`
|
||||||
VirusScan string `json:"virus_scan,omitempty"`
|
VirusScan string `json:"virus_scan,omitempty"`
|
||||||
Tree string `json:"tree,omitempty"`
|
Tree string `json:"tree,omitempty"`
|
||||||
|
@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShardInfoResponse ...
|
|
||||||
type ShardInfoResponse struct {
|
|
||||||
Email string `json:"email"`
|
|
||||||
Body struct {
|
|
||||||
Video []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"video"`
|
|
||||||
ViewDirect []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"view_direct"`
|
|
||||||
WeblinkView []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_view"`
|
|
||||||
WeblinkVideo []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_video"`
|
|
||||||
WeblinkGet []struct {
|
|
||||||
Count int `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_get"`
|
|
||||||
Stock []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"stock"`
|
|
||||||
WeblinkThumbnails []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"weblink_thumbnails"`
|
|
||||||
PublicUpload []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"public_upload"`
|
|
||||||
Auth []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"auth"`
|
|
||||||
Web []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"web"`
|
|
||||||
View []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"view"`
|
|
||||||
Upload []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"upload"`
|
|
||||||
Get []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"get"`
|
|
||||||
Thumbnails []struct {
|
|
||||||
Count string `json:"count"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
} `json:"thumbnails"`
|
|
||||||
} `json:"body"`
|
|
||||||
Time int64 `json:"time"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanupResponse ...
|
// CleanupResponse ...
|
||||||
type CleanupResponse struct {
|
type CleanupResponse struct {
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
|
|
|
@ -37,6 +37,7 @@ import (
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -655,9 +656,14 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
mTime := int64(item.Mtime)
|
||||||
|
if mTime < 0 {
|
||||||
|
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||||
|
mTime = 0
|
||||||
|
}
|
||||||
switch item.Kind {
|
switch item.Kind {
|
||||||
case "folder":
|
case "folder":
|
||||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||||
dirSize := item.Count.Files + item.Count.Folders
|
dirSize := item.Count.Files + item.Count.Folders
|
||||||
return dir, dirSize, nil
|
return dir, dirSize, nil
|
||||||
case "file":
|
case "file":
|
||||||
|
@ -671,7 +677,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||||
hasMetaData: true,
|
hasMetaData: true,
|
||||||
size: item.Size,
|
size: item.Size,
|
||||||
mrHash: binHash,
|
mrHash: binHash,
|
||||||
modTime: time.Unix(item.Mtime, 0),
|
modTime: time.Unix(mTime, 0),
|
||||||
}
|
}
|
||||||
return file, -1, nil
|
return file, -1, nil
|
||||||
default:
|
default:
|
||||||
|
@ -1861,30 +1867,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||||
return f.shardURL, nil
|
return f.shardURL, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
token, err := f.accessToken()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
RootURL: api.DispatchServerURL,
|
||||||
Path: "/api/m1/dispatcher",
|
Method: "GET",
|
||||||
Parameters: url.Values{
|
Path: "/u",
|
||||||
"client_id": {api.OAuthClientID},
|
|
||||||
"access_token": {token},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var info api.ShardInfoResponse
|
var (
|
||||||
|
res *http.Response
|
||||||
|
url string
|
||||||
|
err error
|
||||||
|
)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
res, err = f.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(res, err, f, &opts)
|
if err == nil {
|
||||||
|
url, err = readBodyWord(res)
|
||||||
|
}
|
||||||
|
return fserrors.ShouldRetry(err), err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
closeBody(res)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
f.shardURL = info.Body.Upload[0].URL
|
f.shardURL = url
|
||||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||||
|
|
||||||
|
@ -2116,7 +2122,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
start, end, partial := getTransferRange(o.size, options...)
|
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
}
|
||||||
|
if partialRequest {
|
||||||
|
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||||
|
headers["Range"] = rangeStr
|
||||||
|
// headers["Content-Range"] = rangeStr
|
||||||
|
headers["Accept-Ranges"] = "bytes"
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: set custom timeouts
|
// TODO: set custom timeouts
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
|
@ -2127,10 +2144,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
"client_id": {api.OAuthClientID},
|
"client_id": {api.OAuthClientID},
|
||||||
"token": {token},
|
"token": {token},
|
||||||
},
|
},
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: headers,
|
||||||
"Accept": "*/*",
|
|
||||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
|
@ -2151,18 +2165,36 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var hasher gohash.Hash
|
// Server should respond with Status 206 and Content-Range header to a range
|
||||||
if !partial {
|
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||||
|
partialResponse := res.StatusCode == 206
|
||||||
|
|
||||||
|
var (
|
||||||
|
hasher gohash.Hash
|
||||||
|
wrapStream io.ReadCloser
|
||||||
|
)
|
||||||
|
if !partialResponse {
|
||||||
// Cannot check hash of partial download
|
// Cannot check hash of partial download
|
||||||
hasher = mrhash.New()
|
hasher = mrhash.New()
|
||||||
}
|
}
|
||||||
wrapStream := &endHandler{
|
wrapStream = &endHandler{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
stream: res.Body,
|
stream: res.Body,
|
||||||
hasher: hasher,
|
hasher: hasher,
|
||||||
o: o,
|
o: o,
|
||||||
server: server,
|
server: server,
|
||||||
}
|
}
|
||||||
|
if partialRequest && !partialResponse {
|
||||||
|
fs.Debugf(o, "Server returned full content instead of range")
|
||||||
|
if start > 0 {
|
||||||
|
// Discard the beginning of the data
|
||||||
|
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||||
|
}
|
||||||
return wrapStream, nil
|
return wrapStream, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1247,6 +1247,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
return nil, errors.Wrap(err, "about failed")
|
return nil, errors.Wrap(err, "about failed")
|
||||||
}
|
}
|
||||||
q := drive.Quota
|
q := drive.Quota
|
||||||
|
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||||
|
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||||
|
return &fs.Usage{}, nil
|
||||||
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||||
|
|
|
@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
var retryErrorCodes = []int{
|
var retryErrorCodes = []int{
|
||||||
400, // Bad request (seen in "Next token is expired")
|
|
||||||
401, // Unauthorized (seen in "Token has expired")
|
401, // Unauthorized (seen in "Token has expired")
|
||||||
408, // Request Timeout
|
408, // Request Timeout
|
||||||
423, // Locked - get this on folders sometimes
|
423, // Locked - get this on folders sometimes
|
||||||
|
|
|
@ -104,8 +104,9 @@ type ItemResult struct {
|
||||||
|
|
||||||
// Hashes contains the supported hashes
|
// Hashes contains the supported hashes
|
||||||
type Hashes struct {
|
type Hashes struct {
|
||||||
SHA1 string `json:"sha1"`
|
SHA1 string `json:"sha1"`
|
||||||
MD5 string `json:"md5"`
|
MD5 string `json:"md5"`
|
||||||
|
SHA256 string `json:"sha256"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFileResponse is the response from /uploadfile
|
// UploadFileResponse is the response from /uploadfile
|
||||||
|
|
|
@ -885,6 +885,13 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||||
|
// support SHA256 yet).
|
||||||
|
//
|
||||||
|
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
||||||
|
if f.opt.Hostname == "eapi.pcloud.com" {
|
||||||
|
return hash.Set(hash.SHA1)
|
||||||
|
}
|
||||||
return hash.Set(hash.MD5 | hash.SHA1)
|
return hash.Set(hash.MD5 | hash.SHA1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
181
backend/s3/s3.go
181
backend/s3/s3.go
|
@ -58,7 +58,7 @@ import (
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "s3",
|
Name: "s3",
|
||||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
|
@ -94,6 +94,9 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Value: "StackPath",
|
Value: "StackPath",
|
||||||
Help: "StackPath Object Storage",
|
Help: "StackPath Object Storage",
|
||||||
|
}, {
|
||||||
|
Value: "TencentCOS",
|
||||||
|
Help: "Tencent Cloud Object Storage (COS)",
|
||||||
}, {
|
}, {
|
||||||
Value: "Wasabi",
|
Value: "Wasabi",
|
||||||
Help: "Wasabi Object Storage",
|
Help: "Wasabi Object Storage",
|
||||||
|
@ -119,6 +122,9 @@ func init() {
|
||||||
Name: "secret_access_key",
|
Name: "secret_access_key",
|
||||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||||
}, {
|
}, {
|
||||||
|
// References:
|
||||||
|
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||||
|
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.",
|
Help: "Region to connect to.",
|
||||||
Provider: "AWS",
|
Provider: "AWS",
|
||||||
|
@ -128,12 +134,12 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east-2",
|
Value: "us-east-2",
|
||||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||||
}, {
|
|
||||||
Value: "us-west-2",
|
|
||||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west-1",
|
Value: "us-west-1",
|
||||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-2",
|
||||||
|
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ca-central-1",
|
Value: "ca-central-1",
|
||||||
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
||||||
|
@ -143,9 +149,15 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-west-2",
|
Value: "eu-west-2",
|
||||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-west-3",
|
||||||
|
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-north-1",
|
Value: "eu-north-1",
|
||||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-south-1",
|
||||||
|
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-central-1",
|
Value: "eu-central-1",
|
||||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||||
|
@ -161,15 +173,36 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-northeast-2",
|
Value: "ap-northeast-2",
|
||||||
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
||||||
|
}, {
|
||||||
|
Value: "ap-northeast-3",
|
||||||
|
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-south-1",
|
Value: "ap-south-1",
|
||||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-east-1",
|
Value: "ap-east-1",
|
||||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||||
}, {
|
}, {
|
||||||
Value: "sa-east-1",
|
Value: "sa-east-1",
|
||||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||||
|
}, {
|
||||||
|
Value: "me-south-1",
|
||||||
|
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
|
||||||
|
}, {
|
||||||
|
Value: "af-south-1",
|
||||||
|
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
|
||||||
|
}, {
|
||||||
|
Value: "cn-north-1",
|
||||||
|
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
|
||||||
|
}, {
|
||||||
|
Value: "cn-northwest-1",
|
||||||
|
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-east-1",
|
||||||
|
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-west-1",
|
||||||
|
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
|
@ -185,7 +218,7 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||||
Provider: "!AWS,Alibaba,Scaleway",
|
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||||
|
@ -476,10 +509,73 @@ func init() {
|
||||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||||
Help: "EU Endpoint",
|
Help: "EU Endpoint",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "Endpoint for Tencent COS API.",
|
||||||
|
Provider: "TencentCOS",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "cos.ap-beijing.myqcloud.com",
|
||||||
|
Help: "Beijing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-nanjing.myqcloud.com",
|
||||||
|
Help: "Nanjing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-shanghai.myqcloud.com",
|
||||||
|
Help: "Shanghai Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-guangzhou.myqcloud.com",
|
||||||
|
Help: "Guangzhou Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-nanjing.myqcloud.com",
|
||||||
|
Help: "Nanjing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-chengdu.myqcloud.com",
|
||||||
|
Help: "Chengdu Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-chongqing.myqcloud.com",
|
||||||
|
Help: "Chongqing Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-hongkong.myqcloud.com",
|
||||||
|
Help: "Hong Kong (China) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-singapore.myqcloud.com",
|
||||||
|
Help: "Singapore Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-mumbai.myqcloud.com",
|
||||||
|
Help: "Mumbai Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-seoul.myqcloud.com",
|
||||||
|
Help: "Seoul Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-bangkok.myqcloud.com",
|
||||||
|
Help: "Bangkok Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.ap-tokyo.myqcloud.com",
|
||||||
|
Help: "Tokyo Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.na-siliconvalley.myqcloud.com",
|
||||||
|
Help: "Silicon Valley Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.na-ashburn.myqcloud.com",
|
||||||
|
Help: "Virginia Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.na-toronto.myqcloud.com",
|
||||||
|
Help: "Toronto Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.eu-frankfurt.myqcloud.com",
|
||||||
|
Help: "Frankfurt Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.eu-moscow.myqcloud.com",
|
||||||
|
Help: "Moscow Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cos.accelerate.myqcloud.com",
|
||||||
|
Help: "Use Tencent COS Accelerate Endpoint.",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "objects-us-east-1.dream.io",
|
Value: "objects-us-east-1.dream.io",
|
||||||
Help: "Dream Objects endpoint",
|
Help: "Dream Objects endpoint",
|
||||||
|
@ -519,12 +615,12 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east-2",
|
Value: "us-east-2",
|
||||||
Help: "US East (Ohio) Region.",
|
Help: "US East (Ohio) Region.",
|
||||||
}, {
|
|
||||||
Value: "us-west-2",
|
|
||||||
Help: "US West (Oregon) Region.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west-1",
|
Value: "us-west-1",
|
||||||
Help: "US West (Northern California) Region.",
|
Help: "US West (Northern California) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-2",
|
||||||
|
Help: "US West (Oregon) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ca-central-1",
|
Value: "ca-central-1",
|
||||||
Help: "Canada (Central) Region.",
|
Help: "Canada (Central) Region.",
|
||||||
|
@ -534,9 +630,15 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-west-2",
|
Value: "eu-west-2",
|
||||||
Help: "EU (London) Region.",
|
Help: "EU (London) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-west-3",
|
||||||
|
Help: "EU (Paris) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-north-1",
|
Value: "eu-north-1",
|
||||||
Help: "EU (Stockholm) Region.",
|
Help: "EU (Stockholm) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "eu-south-1",
|
||||||
|
Help: "EU (Milan) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "EU",
|
Value: "EU",
|
||||||
Help: "EU Region.",
|
Help: "EU Region.",
|
||||||
|
@ -551,16 +653,37 @@ func init() {
|
||||||
Help: "Asia Pacific (Tokyo) Region.",
|
Help: "Asia Pacific (Tokyo) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-northeast-2",
|
Value: "ap-northeast-2",
|
||||||
Help: "Asia Pacific (Seoul)",
|
Help: "Asia Pacific (Seoul) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "ap-northeast-3",
|
||||||
|
Help: "Asia Pacific (Osaka-Local) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-south-1",
|
Value: "ap-south-1",
|
||||||
Help: "Asia Pacific (Mumbai)",
|
Help: "Asia Pacific (Mumbai) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-east-1",
|
Value: "ap-east-1",
|
||||||
Help: "Asia Pacific (Hong Kong)",
|
Help: "Asia Pacific (Hong Kong) Region.",
|
||||||
}, {
|
}, {
|
||||||
Value: "sa-east-1",
|
Value: "sa-east-1",
|
||||||
Help: "South America (Sao Paulo) Region.",
|
Help: "South America (Sao Paulo) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "me-south-1",
|
||||||
|
Help: "Middle East (Bahrain) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "af-south-1",
|
||||||
|
Help: "Africa (Cape Town) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "cn-north-1",
|
||||||
|
Help: "China (Beijing) Region",
|
||||||
|
}, {
|
||||||
|
Value: "cn-northwest-1",
|
||||||
|
Help: "China (Ningxia) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-east-1",
|
||||||
|
Help: "AWS GovCloud (US-East) Region.",
|
||||||
|
}, {
|
||||||
|
Value: "us-gov-west-1",
|
||||||
|
Help: "AWS GovCloud (US) Region.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
|
@ -666,7 +789,7 @@ func init() {
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||||
|
@ -678,9 +801,13 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
|
||||||
Note that this ACL is applied when server side copying objects as S3
|
Note that this ACL is applied when server side copying objects as S3
|
||||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "default",
|
||||||
|
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
|
||||||
|
Provider: "TencentCOS",
|
||||||
|
}, {
|
||||||
Value: "private",
|
Value: "private",
|
||||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||||
Provider: "!IBMCOS",
|
Provider: "!IBMCOS,TencentCOS",
|
||||||
}, {
|
}, {
|
||||||
Value: "public-read",
|
Value: "public-read",
|
||||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||||
|
@ -842,6 +969,24 @@ isn't set then "acl" is used instead.`,
|
||||||
Value: "STANDARD_IA",
|
Value: "STANDARD_IA",
|
||||||
Help: "Infrequent access storage mode.",
|
Help: "Infrequent access storage mode.",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||||
|
Name: "storage_class",
|
||||||
|
Help: "The storage class to use when storing new objects in Tencent COS.",
|
||||||
|
Provider: "TencentCOS",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "Default",
|
||||||
|
}, {
|
||||||
|
Value: "STANDARD",
|
||||||
|
Help: "Standard storage class",
|
||||||
|
}, {
|
||||||
|
Value: "ARCHIVE",
|
||||||
|
Help: "Archive storage mode.",
|
||||||
|
}, {
|
||||||
|
Value: "STANDARD_IA",
|
||||||
|
Help: "Infrequent access storage mode.",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
|
@ -975,7 +1120,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||||
false - rclone will do this automatically based on the provider
|
false - rclone will do this automatically based on the provider
|
||||||
setting.`,
|
setting.`,
|
||||||
Default: true,
|
Default: true,
|
||||||
|
@ -1305,7 +1450,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||||
if opt.Region == "" {
|
if opt.Region == "" {
|
||||||
opt.Region = "us-east-1"
|
opt.Region = "us-east-1"
|
||||||
}
|
}
|
||||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
|
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
|
||||||
opt.ForcePathStyle = false
|
opt.ForcePathStyle = false
|
||||||
}
|
}
|
||||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
||||||
|
@ -1587,7 +1732,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||||
//
|
//
|
||||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||||
// XML Syntax error is detected.
|
// XML Syntax error is detected.
|
||||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
|
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
|
||||||
for {
|
for {
|
||||||
// FIXME need to implement ALL loop
|
// FIXME need to implement ALL loop
|
||||||
req := s3.ListObjectsInput{
|
req := s3.ListObjectsInput{
|
||||||
|
|
|
@ -46,7 +46,7 @@ type Library struct {
|
||||||
Encrypted bool `json:"encrypted"`
|
Encrypted bool `json:"encrypted"`
|
||||||
Owner string `json:"owner"`
|
Owner string `json:"owner"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Size int `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Modified int64 `json:"mtime"`
|
Modified int64 `json:"mtime"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||||
|
|
||||||
for _, library := range libraries {
|
for _, library := range libraries {
|
||||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||||
d.SetSize(int64(library.Size))
|
d.SetSize(library.Size)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1087,7 +1087,7 @@ func shellEscape(str string) string {
|
||||||
func parseHash(bytes []byte) string {
|
func parseHash(bytes []byte) string {
|
||||||
// For strings with backslash *sum writes a leading \
|
// For strings with backslash *sum writes a leading \
|
||||||
// https://unix.stackexchange.com/q/313733/94054
|
// https://unix.stackexchange.com/q/313733/94054
|
||||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses the byte array output from the SSH session
|
// Parses the byte array output from the SSH session
|
||||||
|
|
|
@ -3,7 +3,6 @@ package policy
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/union/upstream"
|
"github.com/rclone/rclone/backend/union/upstream"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
@ -20,12 +19,10 @@ type EpRand struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
return upstreams[rand.Intn(len(upstreams))]
|
return upstreams[rand.Intn(len(upstreams))]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||||
rand.Seed(time.Now().Unix())
|
|
||||||
return entries[rand.Intn(len(entries))]
|
return entries[rand.Intn(len(entries))]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -145,11 +145,16 @@ func (f *Fs) Hashes() hash.Set {
|
||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
upstreams, err := f.create(ctx, dir)
|
upstreams, err := f.create(ctx, dir)
|
||||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
if err == fs.ErrorObjectNotFound {
|
||||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
if dir != parentDir(dir) {
|
||||||
return err
|
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
upstreams, err = f.create(ctx, dir)
|
||||||
|
} else if dir == "" {
|
||||||
|
// If root dirs not created then create them
|
||||||
|
upstreams, err = f.upstreams, nil
|
||||||
}
|
}
|
||||||
upstreams, err = f.create(ctx, dir)
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -818,6 +823,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||||
var features = (&fs.Features{
|
var features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
|
|
|
@ -1129,10 +1129,14 @@ func (o *Object) Storable() bool {
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
|
fs.FixRangeOption(options, o.size)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: o.filePath(),
|
Path: o.filePath(),
|
||||||
Options: options,
|
Options: options,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Depth": "0",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
|
|
|
@ -36,6 +36,7 @@ var (
|
||||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||||
|
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -280,7 +281,7 @@ func stripVersion(goarch string) string {
|
||||||
|
|
||||||
// build the binary in dir returning success or failure
|
// build the binary in dir returning success or failure
|
||||||
func compileArch(version, goos, goarch, dir string) bool {
|
func compileArch(version, goos, goarch, dir string) bool {
|
||||||
log.Printf("Compiling %s/%s", goos, goarch)
|
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||||
output := filepath.Join(dir, "rclone")
|
output := filepath.Join(dir, "rclone")
|
||||||
if goos == "windows" {
|
if goos == "windows" {
|
||||||
output += ".exe"
|
output += ".exe"
|
||||||
|
@ -298,11 +299,17 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||||
"go", "build",
|
"go", "build",
|
||||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||||
"-trimpath",
|
"-trimpath",
|
||||||
"-i",
|
|
||||||
"-o", output,
|
"-o", output,
|
||||||
"-tags", *tags,
|
"-tags", *tags,
|
||||||
"..",
|
|
||||||
}
|
}
|
||||||
|
if *buildmode != "" {
|
||||||
|
args = append(args,
|
||||||
|
"-buildmode", *buildmode,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
args = append(args,
|
||||||
|
"..",
|
||||||
|
)
|
||||||
env := []string{
|
env := []string{
|
||||||
"GOOS=" + goos,
|
"GOOS=" + goos,
|
||||||
"GOARCH=" + stripVersion(goarch),
|
"GOARCH=" + stripVersion(goarch),
|
||||||
|
@ -325,7 +332,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||||
artifacts := []string{buildZip(dir)}
|
artifacts := []string{buildZip(dir)}
|
||||||
// build a .deb and .rpm if appropriate
|
// build a .deb and .rpm if appropriate
|
||||||
if goos == "linux" {
|
if goos == "linux" {
|
||||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||||
}
|
}
|
||||||
if *copyAs != "" {
|
if *copyAs != "" {
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
|
|
|
@ -15,10 +15,12 @@ description: |
|
||||||
vendor: "rclone"
|
vendor: "rclone"
|
||||||
homepage: "https://rclone.org"
|
homepage: "https://rclone.org"
|
||||||
license: "MIT"
|
license: "MIT"
|
||||||
# No longer supported? See https://github.com/goreleaser/nfpm/issues/144
|
contents:
|
||||||
# bindir: "/usr/bin"
|
- src: ./rclone
|
||||||
files:
|
dst: /usr/bin/rclone
|
||||||
./rclone: "/usr/bin/rclone"
|
- src: ./README.html
|
||||||
./README.html: "/usr/share/doc/rclone/README.html"
|
dst: /usr/share/doc/rclone/README.html
|
||||||
./README.txt: "/usr/share/doc/rclone/README.txt"
|
- src: ./README.txt
|
||||||
./rclone.1: "/usr/share/man/man1/rclone.1"
|
dst: /usr/share/doc/rclone/README.txt
|
||||||
|
- src: ./rclone.1
|
||||||
|
dst: /usr/share/man/man1/rclone.1
|
||||||
|
|
|
@ -29,6 +29,7 @@ var (
|
||||||
func init() {
|
func init() {
|
||||||
cmd.Root.AddCommand(commandDefinition)
|
cmd.Root.AddCommand(commandDefinition)
|
||||||
cmdFlags := commandDefinition.Flags()
|
cmdFlags := commandDefinition.Flags()
|
||||||
|
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||||
AddFlags(cmdFlags)
|
AddFlags(cmdFlags)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +51,7 @@ the source match the files in the destination, not the other way
|
||||||
around. This means that extra files in the destination that are not in
|
around. This means that extra files in the destination that are not in
|
||||||
the source will not be detected.
|
the source will not be detected.
|
||||||
|
|
||||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
|
||||||
and |--error| flags write paths, one per line, to the file name (or
|
and |--error| flags write paths, one per line, to the file name (or
|
||||||
stdout if it is |-|) supplied. What they write is described in the
|
stdout if it is |-|) supplied. What they write is described in the
|
||||||
help below. For example |--differ| will write all paths which are
|
help below. For example |--differ| will write all paths which are
|
||||||
|
|
|
@ -14,7 +14,7 @@ func init() {
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "cleanup remote:path",
|
Use: "cleanup remote:path",
|
||||||
Short: `Clean up the remote if possible`,
|
Short: `Clean up the remote if possible.`,
|
||||||
Long: `
|
Long: `
|
||||||
Clean up the remote if possible. Empty the trash or delete old file
|
Clean up the remote if possible. Empty the trash or delete old file
|
||||||
versions. Not supported by all remotes.
|
versions. Not supported by all remotes.
|
||||||
|
|
|
@ -9,7 +9,6 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
|
@ -35,6 +34,7 @@ import (
|
||||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
@ -512,7 +512,9 @@ func AddBackendFlags() {
|
||||||
|
|
||||||
// Main runs rclone interpreting flags and commands out of os.Args
|
// Main runs rclone interpreting flags and commands out of os.Args
|
||||||
func Main() {
|
func Main() {
|
||||||
rand.Seed(time.Now().Unix())
|
if err := random.Seed(); err != nil {
|
||||||
|
log.Fatalf("Fatal error: %v", err)
|
||||||
|
}
|
||||||
setupRootCommand(Root)
|
setupRootCommand(Root)
|
||||||
AddBackendFlags()
|
AddBackendFlags()
|
||||||
if err := Root.Execute(); err != nil {
|
if err := Root.Execute(); err != nil {
|
||||||
|
|
32
cmd/cmount/mount_brew.go
Normal file
32
cmd/cmount/mount_brew.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
// Build for macos with the brew tag to handle the absence
|
||||||
|
// of fuse and print an appropriate error message
|
||||||
|
|
||||||
|
// +build brew
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package cmount
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
|
"github.com/rclone/rclone/vfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
name := "mount"
|
||||||
|
cmd := mountlib.NewMountCommand(name, false, mount)
|
||||||
|
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||||
|
mountlib.AddRc("cmount", mount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mount the file system
|
||||||
|
//
|
||||||
|
// The mount point will be ready when this returns.
|
||||||
|
//
|
||||||
|
// returns an error, and an error channel for the serve process to
|
||||||
|
// report an error when fusermount is called.
|
||||||
|
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
|
||||||
|
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
|
||||||
|
"Please install the binaries available at https://rclone." +
|
||||||
|
"org/downloads/ instead if you want to use the mount command")
|
||||||
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
// Build for cmount for unsupported platforms to stop go complaining
|
// Build for cmount for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
|
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||||
|
|
||||||
package cmount
|
package cmount
|
||||||
|
|
|
@ -22,7 +22,7 @@ func init() {
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "copy source:path dest:path",
|
Use: "copy source:path dest:path",
|
||||||
Short: `Copy files from source to dest, skipping already copied`,
|
Short: `Copy files from source to dest, skipping already copied.`,
|
||||||
Long: `
|
Long: `
|
||||||
Copy the source to the destination. Doesn't transfer
|
Copy the source to the destination. Doesn't transfer
|
||||||
unchanged files, testing by size and modification time or
|
unchanged files, testing by size and modification time or
|
||||||
|
|
|
@ -15,7 +15,7 @@ func init() {
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "copyto source:path dest:path",
|
Use: "copyto source:path dest:path",
|
||||||
Short: `Copy files from source to dest, skipping already copied`,
|
Short: `Copy files from source to dest, skipping already copied.`,
|
||||||
Long: `
|
Long: `
|
||||||
If source:path is a file or directory then it copies it to a file or
|
If source:path is a file or directory then it copies it to a file or
|
||||||
directory named dest:path.
|
directory named dest:path.
|
||||||
|
|
|
@ -44,7 +44,7 @@ func init() {
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "lsf remote:path",
|
Use: "lsf remote:path",
|
||||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||||
Long: `
|
Long: `
|
||||||
List the contents of the source path (directories and objects) to
|
List the contents of the source path (directories and objects) to
|
||||||
standard output in a form which is easy to parse by scripts. By
|
standard output in a form which is easy to parse by scripts. By
|
||||||
|
|
|
@ -107,6 +107,13 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, translateError(err)
|
return nil, translateError(err)
|
||||||
}
|
}
|
||||||
|
dirents = append(dirents, fuse.Dirent{
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
Name: ".",
|
||||||
|
}, fuse.Dirent{
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
Name: "..",
|
||||||
|
})
|
||||||
for _, node := range items {
|
for _, node := range items {
|
||||||
name := node.Name()
|
name := node.Name()
|
||||||
if len(name) > mountlib.MaxLeafSize {
|
if len(name) > mountlib.MaxLeafSize {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Build for mount for unsupported platforms to stop go complaining
|
// Build for mount for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// Invert the build constraint: linux,go1.13 darwin,go1.13 freebsd,go1.13
|
// Invert the build constraint: linux,go1.13 freebsd,go1.13
|
||||||
//
|
//
|
||||||
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
|
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
|
||||||
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))
|
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))
|
||||||
|
|
|
@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||||
modTime := node.ModTime()
|
modTime := node.ModTime()
|
||||||
// set attributes
|
// set attributes
|
||||||
vfs := node.VFS()
|
vfs := node.VFS()
|
||||||
attr.Owner.Gid = vfs.Opt.UID
|
attr.Owner.Gid = vfs.Opt.GID
|
||||||
attr.Owner.Uid = vfs.Opt.GID
|
attr.Owner.Uid = vfs.Opt.UID
|
||||||
attr.Mode = getMode(node)
|
attr.Mode = getMode(node)
|
||||||
attr.Size = Size
|
attr.Size = Size
|
||||||
attr.Nlink = 1
|
attr.Nlink = 1
|
||||||
|
|
|
@ -192,6 +192,9 @@ Stopping the mount manually:
|
||||||
# OS X
|
# OS X
|
||||||
umount /path/to/local/mount
|
umount /path/to/local/mount
|
||||||
|
|
||||||
|
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||||
|
or newer on some platforms depending on the underlying FUSE library in use.
|
||||||
|
|
||||||
### Installing on Windows
|
### Installing on Windows
|
||||||
|
|
||||||
To run rclone ` + commandName + ` on Windows, you will need to
|
To run rclone ` + commandName + ` on Windows, you will need to
|
||||||
|
@ -333,9 +336,6 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
||||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||||
|
|
||||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
|
||||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
|
||||||
` + vfs.Help,
|
` + vfs.Help,
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(2, 2, command, args)
|
cmd.CheckArgs(2, 2, command, args)
|
||||||
|
|
|
@ -17,7 +17,7 @@ func init() {
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "obscure password",
|
Use: "obscure password",
|
||||||
Short: `Obscure password for use in the rclone config file`,
|
Short: `Obscure password for use in the rclone config file.`,
|
||||||
Long: `In the rclone config file, human readable passwords are
|
Long: `In the rclone config file, human readable passwords are
|
||||||
obscured. Obscuring them is done by encrypting them and writing them
|
obscured. Obscuring them is done by encrypting them and writing them
|
||||||
out in base64. This is **not** a secure way of encrypting these
|
out in base64. This is **not** a secure way of encrypting these
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -172,8 +173,11 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||||
obj := entry.(fs.Object)
|
obj := entry.(fs.Object)
|
||||||
file := node.(*vfs.File)
|
file := node.(*vfs.File)
|
||||||
|
|
||||||
// Set content length since we know how long the object is
|
// Set content length if we know how long the object is
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
knownSize := obj.Size() >= 0
|
||||||
|
if knownSize {
|
||||||
|
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||||
|
}
|
||||||
|
|
||||||
// Set content type
|
// Set content type
|
||||||
mimeType := fs.MimeType(r.Context(), obj)
|
mimeType := fs.MimeType(r.Context(), obj)
|
||||||
|
@ -210,5 +214,19 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||||
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
|
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
|
||||||
|
|
||||||
// Serve the file
|
// Serve the file
|
||||||
http.ServeContent(w, r, remote, node.ModTime(), in)
|
if knownSize {
|
||||||
|
http.ServeContent(w, r, remote, node.ModTime(), in)
|
||||||
|
} else {
|
||||||
|
// http.ServeContent can't serve unknown length files
|
||||||
|
if rangeRequest := r.Header.Get("Range"); rangeRequest != "" {
|
||||||
|
http.Error(w, "Can't use Range: on files of unknown length", http.StatusRequestedRangeNotSatisfiable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err := io.Copy(w, in)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(obj, "Didn't finish writing GET request (wrote %d/unknown bytes): %v", n, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,6 +75,39 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||||
return VFS
|
return VFS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Accept a single connection - run in a go routine as the ssh
|
||||||
|
// authentication can block
|
||||||
|
func (s *server) acceptConnection(nConn net.Conn) {
|
||||||
|
what := describeConn(nConn)
|
||||||
|
|
||||||
|
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||||
|
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(what, "SSH login failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||||
|
|
||||||
|
// Discard all global out-of-band Requests
|
||||||
|
go ssh.DiscardRequests(reqs)
|
||||||
|
|
||||||
|
c := &conn{
|
||||||
|
what: what,
|
||||||
|
vfs: s.getVFS(what, sshConn),
|
||||||
|
}
|
||||||
|
if c.vfs == nil {
|
||||||
|
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||||
|
_ = nConn.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.handlers = newVFSHandler(c.vfs)
|
||||||
|
|
||||||
|
// Accept all channels
|
||||||
|
go c.handleChannels(chans)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept connections and call them in a go routine
|
||||||
func (s *server) acceptConnections() {
|
func (s *server) acceptConnections() {
|
||||||
for {
|
for {
|
||||||
nConn, err := s.listener.Accept()
|
nConn, err := s.listener.Accept()
|
||||||
|
@ -85,33 +118,7 @@ func (s *server) acceptConnections() {
|
||||||
fs.Errorf(nil, "Failed to accept incoming connection: %v", err)
|
fs.Errorf(nil, "Failed to accept incoming connection: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
what := describeConn(nConn)
|
go s.acceptConnection(nConn)
|
||||||
|
|
||||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
|
||||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(what, "SSH login failed: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
|
||||||
|
|
||||||
// Discard all global out-of-band Requests
|
|
||||||
go ssh.DiscardRequests(reqs)
|
|
||||||
|
|
||||||
c := &conn{
|
|
||||||
what: what,
|
|
||||||
vfs: s.getVFS(what, sshConn),
|
|
||||||
}
|
|
||||||
if c.vfs == nil {
|
|
||||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
|
||||||
_ = nConn.Close()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.handlers = newVFSHandler(c.vfs)
|
|
||||||
|
|
||||||
// Accept all channels
|
|
||||||
go c.handleChannels(chans)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,6 +148,7 @@ WebDAV or S3, that work out of the box.)
|
||||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||||
|
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||||
|
|
|
@ -404,6 +404,7 @@ Note that Box is case insensitive so you can't have a file called
|
||||||
"Hello.doc" and one called "hello.doc".
|
"Hello.doc" and one called "hello.doc".
|
||||||
|
|
||||||
Box file names can't have the `\` character in. rclone maps this to
|
Box file names can't have the `\` character in. rclone maps this to
|
||||||
and from an identical looking unicode equivalent `\`.
|
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||||
|
Reverse Solidus).
|
||||||
|
|
||||||
Box only supports filenames up to 255 characters in length.
|
Box only supports filenames up to 255 characters in length.
|
||||||
|
|
|
@ -5,6 +5,100 @@ description: "Rclone Changelog"
|
||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.53.3 - 2020-11-19
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* random: Fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924 (Nick Craig-Wood)
|
||||||
|
* Passwords you have generated with `rclone config` may be insecure
|
||||||
|
* See [issue #4783](https://github.com/rclone/rclone/issues/4783) for more details and a checking tool
|
||||||
|
* random: Seed math/rand in one place with crypto strong seed (Nick Craig-Wood)
|
||||||
|
* VFS
|
||||||
|
* Fix vfs/refresh calls with fs= parameter (Nick Craig-Wood)
|
||||||
|
* Sharefile
|
||||||
|
* Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||||
|
|
||||||
|
## v1.53.2 - 2020-10-26
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* acounting
|
||||||
|
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||||
|
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||||
|
* operations
|
||||||
|
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||||
|
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||||
|
* build
|
||||||
|
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||||
|
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||||
|
* Mount
|
||||||
|
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||||
|
* VFS
|
||||||
|
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||||
|
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||||
|
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||||
|
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||||
|
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||||
|
* Add a missed update of used cache space (Leo Luan)
|
||||||
|
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||||
|
* Local
|
||||||
|
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||||
|
* Chunker
|
||||||
|
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||||
|
* Fix upload over crypt (Ivan Andreev)
|
||||||
|
* Fichier
|
||||||
|
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||||
|
* Jottacloud
|
||||||
|
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||||
|
* Avoid double url escaping of device/mountpoint (albertony)
|
||||||
|
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||||
|
* Mailru
|
||||||
|
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||||
|
* Fix range requests after june changes on server (Ivan Andreev)
|
||||||
|
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||||
|
* Onedrive
|
||||||
|
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||||
|
* S3
|
||||||
|
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||||
|
* Seafile
|
||||||
|
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||||
|
* SFTP
|
||||||
|
* Always convert the checksum to lower case (buengese)
|
||||||
|
* Union
|
||||||
|
* Create root directories if none exist (Nick Craig-Wood)
|
||||||
|
|
||||||
|
## v1.53.1 - 2020-09-13
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||||
|
|
||||||
|
* Bug Fixes
|
||||||
|
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||||
|
* check
|
||||||
|
* Add back missing --download flag (Nick Craig-Wood)
|
||||||
|
* Fix docs (Nick Craig-Wood)
|
||||||
|
* docs
|
||||||
|
* Note --log-file does append (Nick Craig-Wood)
|
||||||
|
* Add full stops for consistency in rclone --help (edwardxml)
|
||||||
|
* Add Tencent COS to s3 provider list (wjielai)
|
||||||
|
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||||
|
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||||
|
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||||
|
* build
|
||||||
|
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||||
|
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||||
|
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||||
|
* VFS
|
||||||
|
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||||
|
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||||
|
* Local
|
||||||
|
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||||
|
* Drive
|
||||||
|
* Re-adds special oauth help text (Tim Gallant)
|
||||||
|
* Opendrive
|
||||||
|
* Do not retry 400 errors (Evan Harris)
|
||||||
|
|
||||||
## v1.53.0 - 2020-09-02
|
## v1.53.0 - 2020-09-02
|
||||||
|
|
||||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||||
|
|
|
@ -39,10 +39,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||||
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
||||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible
|
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
|
||||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied
|
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied.
|
||||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied
|
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied.
|
||||||
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
|
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
|
||||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
|
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
|
||||||
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
|
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
|
||||||
|
@ -56,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||||
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
|
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
|
||||||
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
|
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
|
||||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing
|
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
|
||||||
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
|
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
|
||||||
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
|
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
|
||||||
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
|
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
|
||||||
|
@ -65,7 +65,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||||
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
|
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
|
||||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||||
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
|
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
|
||||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file
|
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file.
|
||||||
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
|
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
|
||||||
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
|
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
|
||||||
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.
|
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.
|
||||||
|
|
|
@ -29,7 +29,7 @@ the source match the files in the destination, not the other way
|
||||||
around. This means that extra files in the destination that are not in
|
around. This means that extra files in the destination that are not in
|
||||||
the source will not be detected.
|
the source will not be detected.
|
||||||
|
|
||||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||||
and `--error` flags write paths, one per line, to the file name (or
|
and `--error` flags write paths, one per line, to the file name (or
|
||||||
stdout if it is `-`) supplied. What they write is described in the
|
stdout if it is `-`) supplied. What they write is described in the
|
||||||
help below. For example `--differ` will write all paths which are
|
help below. For example `--differ` will write all paths which are
|
||||||
|
@ -55,6 +55,7 @@ rclone check source:path dest:path [flags]
|
||||||
```
|
```
|
||||||
--combined string Make a combined report of changes to this file
|
--combined string Make a combined report of changes to this file
|
||||||
--differ string Report all non-matching files to this file
|
--differ string Report all non-matching files to this file
|
||||||
|
--download Check by downloading rather than with hash.
|
||||||
--error string Report all files with errors (hashing or reading) to this file
|
--error string Report all files with errors (hashing or reading) to this file
|
||||||
-h, --help help for check
|
-h, --help help for check
|
||||||
--match string Report all matching files to this file
|
--match string Report all matching files to this file
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
title: "rclone cleanup"
|
title: "rclone cleanup"
|
||||||
description: "Clean up the remote if possible"
|
description: "Clean up the remote if possible."
|
||||||
slug: rclone_cleanup
|
slug: rclone_cleanup
|
||||||
url: /commands/rclone_cleanup/
|
url: /commands/rclone_cleanup/
|
||||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||||
---
|
---
|
||||||
# rclone cleanup
|
# rclone cleanup
|
||||||
|
|
||||||
Clean up the remote if possible
|
Clean up the remote if possible.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
title: "rclone copy"
|
title: "rclone copy"
|
||||||
description: "Copy files from source to dest, skipping already copied"
|
description: "Copy files from source to dest, skipping already copied."
|
||||||
slug: rclone_copy
|
slug: rclone_copy
|
||||||
url: /commands/rclone_copy/
|
url: /commands/rclone_copy/
|
||||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||||
---
|
---
|
||||||
# rclone copy
|
# rclone copy
|
||||||
|
|
||||||
Copy files from source to dest, skipping already copied
|
Copy files from source to dest, skipping already copied.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
title: "rclone copyto"
|
title: "rclone copyto"
|
||||||
description: "Copy files from source to dest, skipping already copied"
|
description: "Copy files from source to dest, skipping already copied."
|
||||||
slug: rclone_copyto
|
slug: rclone_copyto
|
||||||
url: /commands/rclone_copyto/
|
url: /commands/rclone_copyto/
|
||||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||||
---
|
---
|
||||||
# rclone copyto
|
# rclone copyto
|
||||||
|
|
||||||
Copy files from source to dest, skipping already copied
|
Copy files from source to dest, skipping already copied.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ the source match the files in the destination, not the other way
|
||||||
around. This means that extra files in the destination that are not in
|
around. This means that extra files in the destination that are not in
|
||||||
the source will not be detected.
|
the source will not be detected.
|
||||||
|
|
||||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||||
and `--error` flags write paths, one per line, to the file name (or
|
and `--error` flags write paths, one per line, to the file name (or
|
||||||
stdout if it is `-`) supplied. What they write is described in the
|
stdout if it is `-`) supplied. What they write is described in the
|
||||||
help below. For example `--differ` will write all paths which are
|
help below. For example `--differ` will write all paths which are
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
title: "rclone lsf"
|
title: "rclone lsf"
|
||||||
description: "List directories and objects in remote:path formatted for parsing"
|
description: "List directories and objects in remote:path formatted for parsing."
|
||||||
slug: rclone_lsf
|
slug: rclone_lsf
|
||||||
url: /commands/rclone_lsf/
|
url: /commands/rclone_lsf/
|
||||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
||||||
---
|
---
|
||||||
# rclone lsf
|
# rclone lsf
|
||||||
|
|
||||||
List directories and objects in remote:path formatted for parsing
|
List directories and objects in remote:path formatted for parsing.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,9 @@ Stopping the mount manually:
|
||||||
# OS X
|
# OS X
|
||||||
umount /path/to/local/mount
|
umount /path/to/local/mount
|
||||||
|
|
||||||
|
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||||
|
or newer on some platforms depending on the underlying FUSE library in use.
|
||||||
|
|
||||||
## Installing on Windows
|
## Installing on Windows
|
||||||
|
|
||||||
To run rclone mount on Windows, you will need to
|
To run rclone mount on Windows, you will need to
|
||||||
|
@ -191,9 +194,6 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||||
|
|
||||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
|
||||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
|
||||||
|
|
||||||
## VFS - Virtual File System
|
## VFS - Virtual File System
|
||||||
|
|
||||||
This command uses the VFS layer. This adapts the cloud storage objects
|
This command uses the VFS layer. This adapts the cloud storage objects
|
||||||
|
@ -357,6 +357,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
title: "rclone obscure"
|
title: "rclone obscure"
|
||||||
description: "Obscure password for use in the rclone config file"
|
description: "Obscure password for use in the rclone config file."
|
||||||
slug: rclone_obscure
|
slug: rclone_obscure
|
||||||
url: /commands/rclone_obscure/
|
url: /commands/rclone_obscure/
|
||||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
|
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
|
||||||
---
|
---
|
||||||
# rclone obscure
|
# rclone obscure
|
||||||
|
|
||||||
Obscure password for use in the rclone config file
|
Obscure password for use in the rclone config file.
|
||||||
|
|
||||||
## Synopsis
|
## Synopsis
|
||||||
|
|
||||||
|
|
|
@ -196,6 +196,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
|
@ -195,6 +195,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
|
@ -267,6 +267,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
|
@ -206,6 +206,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
|
@ -275,6 +275,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
## VFS Performance
|
## VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
|
@ -6,23 +6,26 @@ description: "Encryption overlay remote"
|
||||||
{{< icon "fa fa-lock" >}}Crypt
|
{{< icon "fa fa-lock" >}}Crypt
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
The `crypt` remote encrypts and decrypts another remote.
|
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||||
|
|
||||||
To use it first set up the underlying remote following the config
|
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||||
instructions for that remote. You can also use a local pathname
|
config` instructions for that remote.
|
||||||
instead of a remote which will encrypt and decrypt from that directory
|
|
||||||
which might be useful for encrypting onto a USB stick for example.
|
|
||||||
|
|
||||||
First check your chosen remote is working - we'll call it
|
`crypt` applied to a local pathname instead of a remote will
|
||||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||||
will be encrypted and anything outside won't. This means that if you
|
removable drives.
|
||||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
|
||||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
|
||||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
|
||||||
name encryption) which may or may not be what you want.
|
|
||||||
|
|
||||||
Now configure `crypt` using `rclone config`. We will call this one
|
Before configuring the crypt remote, check the underlying remote is
|
||||||
`secret` to differentiate it from the `remote`.
|
working. In this example the underlying remote is called `remote:path`.
|
||||||
|
Anything inside `remote:path` will be encrypted and anything outside
|
||||||
|
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||||
|
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||||
|
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||||
|
file name encryption, rclone will encrypt the bucket name.
|
||||||
|
|
||||||
|
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||||
|
remote is called `secret`, to differentiate it from the underlying
|
||||||
|
`remote`.
|
||||||
|
|
||||||
```
|
```
|
||||||
No remotes found - make a new one
|
No remotes found - make a new one
|
||||||
|
@ -96,49 +99,42 @@ d) Delete this remote
|
||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
**Important** The password is stored in the config file is lightly
|
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||||
obscured so it isn't immediately obvious what it is. It is in no way
|
obscured. That only protects it from cursory inspection. It is not
|
||||||
secure unless you use config file encryption.
|
secure unless encryption of `rclone.conf` is specified.
|
||||||
|
|
||||||
A long passphrase is recommended, or you can use a random one.
|
A long passphrase is recommended, or `rclone config` can generate a
|
||||||
|
random one.
|
||||||
|
|
||||||
The obscured password is created by using AES-CTR with a static key, with
|
The obscured password is created using AES-CTR with a static key. The
|
||||||
the salt stored verbatim at the beginning of the obscured password. This
|
salt is stored verbatim at the beginning of the obscured password. This
|
||||||
static key is shared by between all versions of rclone.
|
static key is shared between all versions of rclone.
|
||||||
|
|
||||||
If you reconfigure rclone with the same passwords/passphrases
|
If you reconfigure rclone with the same passwords/passphrases
|
||||||
elsewhere it will be compatible, but the obscured version will be different
|
elsewhere it will be compatible, but the obscured version will be different
|
||||||
due to the different salt.
|
due to the different salt.
|
||||||
|
|
||||||
Note that rclone does not encrypt
|
Rclone does not encrypt
|
||||||
|
|
||||||
* file length - this can be calculated within 16 bytes
|
* file length - this can be calculated within 16 bytes
|
||||||
* modification time - used for syncing
|
* modification time - used for syncing
|
||||||
|
|
||||||
## Specifying the remote ##
|
## Specifying the remote ##
|
||||||
|
|
||||||
In normal use, make sure the remote has a `:` in. If you specify the
|
In normal use, ensure the remote has a `:` in. If specified without,
|
||||||
remote without a `:` then rclone will use a local directory of that
|
rclone uses a local directory of that name. For example if a remote
|
||||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||||
will encrypt stuff to that directory. If you use a remote of `name`
|
directory. If a remote `name` is specified, rclone targets a directory
|
||||||
then rclone will put files in a directory called `name` in the current
|
`name` in the current directory.
|
||||||
directory.
|
|
||||||
|
|
||||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
files in `path/to/dir` on the remote. With file name encryption, files
|
||||||
file name encryption, then when you save files to
|
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
|
||||||
|
|
||||||
Note that unless you want encrypted bucket names (which are difficult
|
|
||||||
to manage because you won't know what directory they represent in web
|
|
||||||
interfaces etc), you should probably specify a bucket, eg
|
|
||||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
|
||||||
Swift, Hubic, B2, GCS.
|
|
||||||
|
|
||||||
## Example ##
|
## Example ##
|
||||||
|
|
||||||
To test I made a little directory of files using "standard" file name
|
Create the following file structure using "standard" file name
|
||||||
encryption.
|
encryption.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -152,7 +148,7 @@ plaintext/
|
||||||
└── file4.txt
|
└── file4.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy these to the remote and list them back
|
Copy these to the remote, and list them
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q copy plaintext secret:
|
$ rclone -q copy plaintext secret:
|
||||||
|
@ -164,7 +160,7 @@ $ rclone -q ls secret:
|
||||||
9 subdir/file3.txt
|
9 subdir/file3.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Now see what that looked like when encrypted
|
The crypt remote looks like
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
|
@ -175,7 +171,7 @@ $ rclone -q ls remote:path
|
||||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that this retains the directory structure which means you can do this
|
The directory structure is preserved
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls secret:subdir
|
$ rclone -q ls secret:subdir
|
||||||
|
@ -184,9 +180,9 @@ $ rclone -q ls secret:subdir
|
||||||
10 subsubdir/file4.txt
|
10 subsubdir/file4.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
If don't use file name encryption then the remote will look like this
|
Without file name encryption `.bin` extensions are added to underlying
|
||||||
- note the `.bin` extensions added to prevent the cloud provider
|
names. This prevents the cloud provider attempting to interpret file
|
||||||
attempting to interpret the data.
|
content.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
|
@ -199,8 +195,6 @@ $ rclone -q ls remote:path
|
||||||
|
|
||||||
### File name encryption modes ###
|
### File name encryption modes ###
|
||||||
|
|
||||||
Here are some of the features of the file name encryption modes
|
|
||||||
|
|
||||||
Off
|
Off
|
||||||
|
|
||||||
* doesn't hide file names or directory structure
|
* doesn't hide file names or directory structure
|
||||||
|
@ -219,17 +213,19 @@ Standard
|
||||||
Obfuscation
|
Obfuscation
|
||||||
|
|
||||||
This is a simple "rotate" of the filename, with each file having a rot
|
This is a simple "rotate" of the filename, with each file having a rot
|
||||||
distance based on the filename. We store the distance at the beginning
|
distance based on the filename. Rclone stores the distance at the
|
||||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||||
|
|
||||||
This is not a strong encryption of filenames, but it may stop automated
|
Obfuscation is not a strong encryption of filenames, but hinders
|
||||||
scanning tools from picking up on filename patterns. As such it's an
|
automated scanning tools picking up on filename patterns. It is an
|
||||||
intermediate between "off" and "standard". The advantage is that it
|
intermediate between "off" and "standard" which allows for longer path
|
||||||
allows for longer path segment names.
|
segment names.
|
||||||
|
|
||||||
There is a possibility with some unicode based filenames that the
|
There is a possibility with some unicode based filenames that the
|
||||||
obfuscation is weak and may map lower case characters to upper case
|
obfuscation is weak and may map lower case characters to upper case
|
||||||
equivalents. You can not rely on this for strong protection.
|
equivalents.
|
||||||
|
|
||||||
|
Obfuscation cannot be relied upon for strong protection.
|
||||||
|
|
||||||
* file names very lightly obfuscated
|
* file names very lightly obfuscated
|
||||||
* file names can be longer than standard encryption
|
* file names can be longer than standard encryption
|
||||||
|
@ -237,13 +233,14 @@ equivalents. You can not rely on this for strong protection.
|
||||||
* directory structure visible
|
* directory structure visible
|
||||||
* identical files names will have identical uploaded names
|
* identical files names will have identical uploaded names
|
||||||
|
|
||||||
Cloud storage systems have various limits on file name length and
|
Cloud storage systems have limits on file name length and
|
||||||
total path length which you are more likely to hit using "Standard"
|
total path length which rclone is more likely to breach using
|
||||||
file name encryption. If you keep your file names to below 156
|
"Standard" file name encryption. Where file names are less thn 156
|
||||||
characters in length then you should be OK on all providers.
|
characters in length issues should not be encountered, irrespective of
|
||||||
|
cloud storage provider.
|
||||||
|
|
||||||
There may be an even more secure file name encryption mode in the
|
An alternative, future rclone file name encryption mode may tolerate
|
||||||
future which will address the long file name problem.
|
backend provider path length limits.
|
||||||
|
|
||||||
### Directory name encryption ###
|
### Directory name encryption ###
|
||||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||||
|
@ -269,10 +266,10 @@ Example:
|
||||||
Crypt stores modification times using the underlying remote so support
|
Crypt stores modification times using the underlying remote so support
|
||||||
depends on that.
|
depends on that.
|
||||||
|
|
||||||
Hashes are not stored for crypt. However the data integrity is
|
Hashes are not stored for crypt. However the data integrity is
|
||||||
protected by an extremely strong crypto authenticator.
|
protected by an extremely strong crypto authenticator.
|
||||||
|
|
||||||
Note that you should use the `rclone cryptcheck` command to check the
|
Use the `rclone cryptcheck` command to check the
|
||||||
integrity of a crypted remote instead of `rclone check` which can't
|
integrity of a crypted remote instead of `rclone check` which can't
|
||||||
check the checksums properly.
|
check the checksums properly.
|
||||||
|
|
||||||
|
|
|
@ -757,6 +757,8 @@ This can be useful for tracking down problems with syncs in
|
||||||
combination with the `-v` flag. See the [Logging section](#logging)
|
combination with the `-v` flag. See the [Logging section](#logging)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
|
If FILE exists then rclone will append to it.
|
||||||
|
|
||||||
Note that if you are using the `logrotate` program to manage rclone's
|
Note that if you are using the `logrotate` program to manage rclone's
|
||||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||||
have a signal to rotate logs.
|
have a signal to rotate logs.
|
||||||
|
@ -1251,11 +1253,17 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||||
|
|
||||||
will sync `/path/to/local` to `remote:current`, but for any files
|
will copy `/path/to/local` to `remote:current`, but for any files
|
||||||
which would have been updated or deleted have .bak added.
|
which would have been updated or deleted have .bak added.
|
||||||
|
|
||||||
|
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||||
|
it is recommended to put a filter rule in excluding the suffix
|
||||||
|
otherwise the `sync` will delete the backup files.
|
||||||
|
|
||||||
|
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||||
|
|
||||||
### --suffix-keep-extension ###
|
### --suffix-keep-extension ###
|
||||||
|
|
||||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||||
|
|
|
@ -547,8 +547,10 @@ Here are the standard options specific to drive (Google Drive).
|
||||||
|
|
||||||
#### --drive-client-id
|
#### --drive-client-id
|
||||||
|
|
||||||
OAuth Client Id
|
Google Application Client Id
|
||||||
Leave blank normally.
|
Setting your own is recommended.
|
||||||
|
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
|
||||||
|
If you leave this blank, it will use an internal key which is low performance.
|
||||||
|
|
||||||
- Config: client_id
|
- Config: client_id
|
||||||
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
||||||
|
|
|
@ -147,7 +147,7 @@ These flags are available for every command.
|
||||||
--use-json-log Use json log format.
|
--use-json-log Use json log format.
|
||||||
--use-mmap Use mmap allocator (see docs).
|
--use-mmap Use mmap allocator (see docs).
|
||||||
--use-server-modtime Use server modified time instead of object metadata
|
--use-server-modtime Use server modified time instead of object metadata
|
||||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.0")
|
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.3")
|
||||||
-v, --verbose count Print lots more stuff (repeat for more)
|
-v, --verbose count Print lots more stuff (repeat for more)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -246,7 +246,7 @@ and may be set in the config file.
|
||||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||||
--drive-auth-url string Auth server URL.
|
--drive-auth-url string Auth server URL.
|
||||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||||
--drive-client-id string OAuth Client Id
|
--drive-client-id string Google Application Client Id
|
||||||
--drive-client-secret string OAuth Client Secret
|
--drive-client-secret string OAuth Client Secret
|
||||||
--drive-disable-http2 Disable drive using http2 (default true)
|
--drive-disable-http2 Disable drive using http2 (default true)
|
||||||
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
||||||
|
|
|
@ -148,8 +148,13 @@ flag.
|
||||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||||
source does not have an MD5 checksum then the file will be cached
|
source does not have an MD5 checksum then the file will be cached
|
||||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||||
to) before it is uploaded. Small files will be cached in memory - see
|
to) before it is uploaded. Small files will be cached in memory - see
|
||||||
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||||
|
When uploading from local disk the source checksum is always available,
|
||||||
|
so this does not apply. Starting with rclone version 1.52 the same is
|
||||||
|
true for crypted remotes (in older versions the crypt backend would not
|
||||||
|
calculate hashes for uploads from local disk, so the Jottacloud
|
||||||
|
backend had to do it as described above).
|
||||||
|
|
||||||
#### Restricted filename characters
|
#### Restricted filename characters
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||||
| Backblaze B2 | SHA1 | Yes | No | No | R/W |
|
| Backblaze B2 | SHA1 | Yes | No | No | R/W |
|
||||||
| Box | SHA1 | Yes | Yes | No | - |
|
| Box | SHA1 | Yes | Yes | No | - |
|
||||||
| Citrix ShareFile | MD5 | Yes | Yes | No | - |
|
| Citrix ShareFile | MD5 | Yes | Yes | No | - |
|
||||||
| Dropbox | DBHASH † | Yes | Yes | No | - |
|
| Dropbox | DBHASH ¹ | Yes | Yes | No | - |
|
||||||
| FTP | - | No | No | No | - |
|
| FTP | - | No | No | No | - |
|
||||||
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
|
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
|
||||||
| Google Drive | MD5 | Yes | No | Yes | R/W |
|
| Google Drive | MD5 | Yes | No | Yes | R/W |
|
||||||
|
@ -31,25 +31,52 @@ Here is an overview of the major features of each cloud storage system.
|
||||||
| Hubic | MD5 | Yes | No | No | R/W |
|
| Hubic | MD5 | Yes | No | No | R/W |
|
||||||
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
||||||
| Koofr | MD5 | No | Yes | No | - |
|
| Koofr | MD5 | No | Yes | No | - |
|
||||||
| Mail.ru Cloud | Mailru ‡‡‡ | Yes | Yes | No | - |
|
| Mail.ru Cloud | Mailru ⁶ | Yes | Yes | No | - |
|
||||||
| Mega | - | No | No | Yes | - |
|
| Mega | - | No | No | Yes | - |
|
||||||
| Memory | MD5 | Yes | No | No | - |
|
| Memory | MD5 | Yes | No | No | - |
|
||||||
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
||||||
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
| Microsoft OneDrive | SHA1 ⁵ | Yes | Yes | No | R |
|
||||||
| OpenDrive | MD5 | Yes | Yes | Partial \* | - |
|
| OpenDrive | MD5 | Yes | Yes | Partial ⁸ | - |
|
||||||
| OpenStack Swift | MD5 | Yes | No | No | R/W |
|
| OpenStack Swift | MD5 | Yes | No | No | R/W |
|
||||||
| pCloud | MD5, SHA1 | Yes | No | No | W |
|
| pCloud | MD5, SHA1 ⁷ | Yes | No | No | W |
|
||||||
| premiumize.me | - | No | Yes | No | R |
|
| premiumize.me | - | No | Yes | No | R |
|
||||||
| put.io | CRC-32 | Yes | No | Yes | R |
|
| put.io | CRC-32 | Yes | No | Yes | R |
|
||||||
| QingStor | MD5 | No | No | No | R/W |
|
| QingStor | MD5 | No | No | No | R/W |
|
||||||
| Seafile | - | No | No | No | - |
|
| Seafile | - | No | No | No | - |
|
||||||
| SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - |
|
| SFTP | MD5, SHA1 ² | Yes | Depends | No | - |
|
||||||
| SugarSync | - | No | No | No | - |
|
| SugarSync | - | No | No | No | - |
|
||||||
| Tardigrade | - | Yes | No | No | - |
|
| Tardigrade | - | Yes | No | No | - |
|
||||||
| WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - |
|
| WebDAV | MD5, SHA1 ³ | Yes ⁴ | Depends | No | - |
|
||||||
| Yandex Disk | MD5 | Yes | No | No | R/W |
|
| Yandex Disk | MD5 | Yes | No | No | R/W |
|
||||||
| The local filesystem | All | Yes | Depends | No | - |
|
| The local filesystem | All | Yes | Depends | No | - |
|
||||||
|
|
||||||
|
### Notes
|
||||||
|
|
||||||
|
¹ Dropbox supports [its own custom
|
||||||
|
hash](https://www.dropbox.com/developers/reference/content-hash).
|
||||||
|
This is an SHA256 sum of all the 4MB block SHA256s.
|
||||||
|
|
||||||
|
² SFTP supports checksums if the same login has shell access and
|
||||||
|
`md5sum` or `sha1sum` as well as `echo` are in the remote's PATH.
|
||||||
|
|
||||||
|
³ WebDAV supports hashes when used with Owncloud and Nextcloud only.
|
||||||
|
|
||||||
|
⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
||||||
|
|
||||||
|
⁵ Microsoft OneDrive Personal supports SHA1 hashes, whereas OneDrive
|
||||||
|
for business and SharePoint server support Microsoft's own
|
||||||
|
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
||||||
|
|
||||||
|
⁶ Mail.ru uses its own modified SHA1 hash
|
||||||
|
|
||||||
|
⁷ pCloud only supports SHA1 (not MD5) in its EU region
|
||||||
|
|
||||||
|
⁸ Opendrive does not support creation of duplicate files using
|
||||||
|
their web client interface or other stock clients, but the underlying
|
||||||
|
storage platform has been determined to allow duplicate files, and it
|
||||||
|
is possible to create them with `rclone`. It may be that this is a
|
||||||
|
mistake or an unsupported feature.
|
||||||
|
|
||||||
### Hash ###
|
### Hash ###
|
||||||
|
|
||||||
The cloud storage system supports various hash types of the objects.
|
The cloud storage system supports various hash types of the objects.
|
||||||
|
@ -60,23 +87,6 @@ the `check` command.
|
||||||
To use the verify checksums when transferring between cloud storage
|
To use the verify checksums when transferring between cloud storage
|
||||||
systems they must support a common hash type.
|
systems they must support a common hash type.
|
||||||
|
|
||||||
† Note that Dropbox supports [its own custom
|
|
||||||
hash](https://www.dropbox.com/developers/reference/content-hash).
|
|
||||||
This is an SHA256 sum of all the 4MB block SHA256s.
|
|
||||||
|
|
||||||
‡ SFTP supports checksums if the same login has shell access and `md5sum`
|
|
||||||
or `sha1sum` as well as `echo` are in the remote's PATH.
|
|
||||||
|
|
||||||
†† WebDAV supports hashes when used with Owncloud and Nextcloud only.
|
|
||||||
|
|
||||||
††† WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
|
||||||
|
|
||||||
‡‡ Microsoft OneDrive Personal supports SHA1 hashes, whereas OneDrive
|
|
||||||
for business and SharePoint server support Microsoft's own
|
|
||||||
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
|
||||||
|
|
||||||
‡‡‡ Mail.ru uses its own modified SHA1 hash
|
|
||||||
|
|
||||||
### ModTime ###
|
### ModTime ###
|
||||||
|
|
||||||
The cloud storage system supports setting modification times on
|
The cloud storage system supports setting modification times on
|
||||||
|
@ -117,12 +127,6 @@ objects with the same name.
|
||||||
This confuses rclone greatly when syncing - use the `rclone dedupe`
|
This confuses rclone greatly when syncing - use the `rclone dedupe`
|
||||||
command to rename or remove duplicates.
|
command to rename or remove duplicates.
|
||||||
|
|
||||||
\* Opendrive does not support creation of duplicate files using
|
|
||||||
their web client interface or other stock clients, but the underlying
|
|
||||||
storage platform has been determined to allow duplicate files, and it
|
|
||||||
is possible to create them with `rclone`. It may be that this is a
|
|
||||||
mistake or an unsupported feature.
|
|
||||||
|
|
||||||
### Restricted filenames ###
|
### Restricted filenames ###
|
||||||
|
|
||||||
Some cloud storage systems might have restrictions on the characters
|
Some cloud storage systems might have restrictions on the characters
|
||||||
|
|
|
@ -90,8 +90,11 @@ second. These will be used to detect whether objects need syncing or
|
||||||
not. In order to set a Modification time pCloud requires the object
|
not. In order to set a Modification time pCloud requires the object
|
||||||
be re-uploaded.
|
be re-uploaded.
|
||||||
|
|
||||||
pCloud supports MD5 and SHA1 type hashes, so you can use the
|
pCloud supports MD5 and SHA1 type hashes in the US region but and SHA1
|
||||||
`--checksum` flag.
|
only in the EU region, so you can use the `--checksum` flag.
|
||||||
|
|
||||||
|
(Note that pCloud also support SHA256 in the EU region, but rclone
|
||||||
|
does not have support for that yet.)
|
||||||
|
|
||||||
#### Restricted filename characters
|
#### Restricted filename characters
|
||||||
|
|
||||||
|
|
|
@ -537,6 +537,8 @@ OR
|
||||||
"result": "<Raw command line output>"
|
"result": "<Raw command line output>"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
**Authentication is required for this call.**
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
### core/gc: Runs a garbage collection. {#core-gc}
|
### core/gc: Runs a garbage collection. {#core-gc}
|
||||||
|
@ -1212,7 +1214,7 @@ This allows you to remove a plugin using it's name
|
||||||
|
|
||||||
This takes parameters
|
This takes parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format `author`/`plugin_name`
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
@ -1226,7 +1228,7 @@ This allows you to remove a plugin using it's name
|
||||||
|
|
||||||
This takes the following parameters
|
This takes the following parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format `author`/`plugin_name`
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
|
||||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||||
|
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
||||||
{{< /provider_list >}}
|
{{< /provider_list >}}
|
||||||
|
|
||||||
|
@ -138,7 +139,7 @@ Choose a number from below, or type in your own value
|
||||||
/ Asia Pacific (Mumbai)
|
/ Asia Pacific (Mumbai)
|
||||||
13 | Needs location constraint ap-south-1.
|
13 | Needs location constraint ap-south-1.
|
||||||
\ "ap-south-1"
|
\ "ap-south-1"
|
||||||
/ Asia Patific (Hong Kong) Region
|
/ Asia Pacific (Hong Kong) Region
|
||||||
14 | Needs location constraint ap-east-1.
|
14 | Needs location constraint ap-east-1.
|
||||||
\ "ap-east-1"
|
\ "ap-east-1"
|
||||||
/ South America (Sao Paulo) Region
|
/ South America (Sao Paulo) Region
|
||||||
|
@ -455,7 +456,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
|
||||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
|
||||||
### Standard Options
|
### Standard Options
|
||||||
|
|
||||||
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||||
|
|
||||||
#### --s3-provider
|
#### --s3-provider
|
||||||
|
|
||||||
|
@ -486,6 +487,8 @@ Choose your S3 provider.
|
||||||
- Scaleway Object Storage
|
- Scaleway Object Storage
|
||||||
- "StackPath"
|
- "StackPath"
|
||||||
- StackPath Object Storage
|
- StackPath Object Storage
|
||||||
|
- "TencentCOS"
|
||||||
|
- Tencent Cloud Object Storage (COS)
|
||||||
- "Wasabi"
|
- "Wasabi"
|
||||||
- Wasabi Object Storage
|
- Wasabi Object Storage
|
||||||
- "Other"
|
- "Other"
|
||||||
|
@ -542,12 +545,12 @@ Region to connect to.
|
||||||
- "us-east-2"
|
- "us-east-2"
|
||||||
- US East (Ohio) Region
|
- US East (Ohio) Region
|
||||||
- Needs location constraint us-east-2.
|
- Needs location constraint us-east-2.
|
||||||
- "us-west-2"
|
|
||||||
- US West (Oregon) Region
|
|
||||||
- Needs location constraint us-west-2.
|
|
||||||
- "us-west-1"
|
- "us-west-1"
|
||||||
- US West (Northern California) Region
|
- US West (Northern California) Region
|
||||||
- Needs location constraint us-west-1.
|
- Needs location constraint us-west-1.
|
||||||
|
- "us-west-2"
|
||||||
|
- US West (Oregon) Region
|
||||||
|
- Needs location constraint us-west-2.
|
||||||
- "ca-central-1"
|
- "ca-central-1"
|
||||||
- Canada (Central) Region
|
- Canada (Central) Region
|
||||||
- Needs location constraint ca-central-1.
|
- Needs location constraint ca-central-1.
|
||||||
|
@ -557,9 +560,15 @@ Region to connect to.
|
||||||
- "eu-west-2"
|
- "eu-west-2"
|
||||||
- EU (London) Region
|
- EU (London) Region
|
||||||
- Needs location constraint eu-west-2.
|
- Needs location constraint eu-west-2.
|
||||||
|
- "eu-west-3"
|
||||||
|
- EU (Paris) Region
|
||||||
|
- Needs location constraint eu-west-3.
|
||||||
- "eu-north-1"
|
- "eu-north-1"
|
||||||
- EU (Stockholm) Region
|
- EU (Stockholm) Region
|
||||||
- Needs location constraint eu-north-1.
|
- Needs location constraint eu-north-1.
|
||||||
|
- "eu-south-1"
|
||||||
|
- EU (Milan) Region
|
||||||
|
- Needs location constraint eu-south-1.
|
||||||
- "eu-central-1"
|
- "eu-central-1"
|
||||||
- EU (Frankfurt) Region
|
- EU (Frankfurt) Region
|
||||||
- Needs location constraint eu-central-1.
|
- Needs location constraint eu-central-1.
|
||||||
|
@ -575,15 +584,36 @@ Region to connect to.
|
||||||
- "ap-northeast-2"
|
- "ap-northeast-2"
|
||||||
- Asia Pacific (Seoul)
|
- Asia Pacific (Seoul)
|
||||||
- Needs location constraint ap-northeast-2.
|
- Needs location constraint ap-northeast-2.
|
||||||
|
- "ap-northeast-3"
|
||||||
|
- Asia Pacific (Osaka-Local)
|
||||||
|
- Needs location constraint ap-northeast-3.
|
||||||
- "ap-south-1"
|
- "ap-south-1"
|
||||||
- Asia Pacific (Mumbai)
|
- Asia Pacific (Mumbai)
|
||||||
- Needs location constraint ap-south-1.
|
- Needs location constraint ap-south-1.
|
||||||
- "ap-east-1"
|
- "ap-east-1"
|
||||||
- Asia Patific (Hong Kong) Region
|
- Asia Pacific (Hong Kong) Region
|
||||||
- Needs location constraint ap-east-1.
|
- Needs location constraint ap-east-1.
|
||||||
- "sa-east-1"
|
- "sa-east-1"
|
||||||
- South America (Sao Paulo) Region
|
- South America (Sao Paulo) Region
|
||||||
- Needs location constraint sa-east-1.
|
- Needs location constraint sa-east-1.
|
||||||
|
- "me-south-1"
|
||||||
|
- Middle East (Bahrain) Region
|
||||||
|
- Needs location constraint me-south-1.
|
||||||
|
- "af-south-1"
|
||||||
|
- Africa (Cape Town) Region
|
||||||
|
- Needs location constraint af-south-1.
|
||||||
|
- "cn-north-1"
|
||||||
|
- China (Beijing) Region
|
||||||
|
- Needs location constraint cn-north-1.
|
||||||
|
- "cn-northwest-1"
|
||||||
|
- China (Ningxia) Region
|
||||||
|
- Needs location constraint cn-northwest-1.
|
||||||
|
- "us-gov-east-1"
|
||||||
|
- AWS GovCloud (US-East) Region
|
||||||
|
- Needs location constraint us-gov-east-1.
|
||||||
|
- "us-gov-west-1"
|
||||||
|
- AWS GovCloud (US) Region
|
||||||
|
- Needs location constraint us-gov-west-1.
|
||||||
|
|
||||||
#### --s3-region
|
#### --s3-region
|
||||||
|
|
||||||
|
@ -839,6 +869,54 @@ Endpoint for StackPath Object Storage.
|
||||||
|
|
||||||
#### --s3-endpoint
|
#### --s3-endpoint
|
||||||
|
|
||||||
|
Endpoint for Tencent COS API.
|
||||||
|
|
||||||
|
- Config: endpoint
|
||||||
|
- Env Var: RCLONE_S3_ENDPOINT
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
- Examples:
|
||||||
|
- "cos.ap-beijing.myqcloud.com"
|
||||||
|
- Beijing Region.
|
||||||
|
- "cos.ap-nanjing.myqcloud.com"
|
||||||
|
- Nanjing Region.
|
||||||
|
- "cos.ap-shanghai.myqcloud.com"
|
||||||
|
- Shanghai Region.
|
||||||
|
- "cos.ap-guangzhou.myqcloud.com"
|
||||||
|
- Guangzhou Region.
|
||||||
|
- "cos.ap-nanjing.myqcloud.com"
|
||||||
|
- Nanjing Region.
|
||||||
|
- "cos.ap-chengdu.myqcloud.com"
|
||||||
|
- Chengdu Region.
|
||||||
|
- "cos.ap-chongqing.myqcloud.com"
|
||||||
|
- Chongqing Region.
|
||||||
|
- "cos.ap-hongkong.myqcloud.com"
|
||||||
|
- Hong Kong (China) Region.
|
||||||
|
- "cos.ap-singapore.myqcloud.com"
|
||||||
|
- Singapore Region.
|
||||||
|
- "cos.ap-mumbai.myqcloud.com"
|
||||||
|
- Mumbai Region.
|
||||||
|
- "cos.ap-seoul.myqcloud.com"
|
||||||
|
- Seoul Region.
|
||||||
|
- "cos.ap-bangkok.myqcloud.com"
|
||||||
|
- Bangkok Region.
|
||||||
|
- "cos.ap-tokyo.myqcloud.com"
|
||||||
|
- Tokyo Region.
|
||||||
|
- "cos.na-siliconvalley.myqcloud.com"
|
||||||
|
- Silicon Valley Region.
|
||||||
|
- "cos.na-ashburn.myqcloud.com"
|
||||||
|
- Virginia Region.
|
||||||
|
- "cos.na-toronto.myqcloud.com"
|
||||||
|
- Toronto Region.
|
||||||
|
- "cos.eu-frankfurt.myqcloud.com"
|
||||||
|
- Frankfurt Region.
|
||||||
|
- "cos.eu-moscow.myqcloud.com"
|
||||||
|
- Moscow Region.
|
||||||
|
- "cos.accelerate.myqcloud.com"
|
||||||
|
- Use Tencent COS Accelerate Endpoint.
|
||||||
|
|
||||||
|
#### --s3-endpoint
|
||||||
|
|
||||||
Endpoint for S3 API.
|
Endpoint for S3 API.
|
||||||
Required when using an S3 clone.
|
Required when using an S3 clone.
|
||||||
|
|
||||||
|
@ -876,18 +954,22 @@ Used when creating buckets only.
|
||||||
- Empty for US Region, Northern Virginia or Pacific Northwest.
|
- Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||||
- "us-east-2"
|
- "us-east-2"
|
||||||
- US East (Ohio) Region.
|
- US East (Ohio) Region.
|
||||||
- "us-west-2"
|
|
||||||
- US West (Oregon) Region.
|
|
||||||
- "us-west-1"
|
- "us-west-1"
|
||||||
- US West (Northern California) Region.
|
- US West (Northern California) Region.
|
||||||
|
- "us-west-2"
|
||||||
|
- US West (Oregon) Region.
|
||||||
- "ca-central-1"
|
- "ca-central-1"
|
||||||
- Canada (Central) Region.
|
- Canada (Central) Region.
|
||||||
- "eu-west-1"
|
- "eu-west-1"
|
||||||
- EU (Ireland) Region.
|
- EU (Ireland) Region.
|
||||||
- "eu-west-2"
|
- "eu-west-2"
|
||||||
- EU (London) Region.
|
- EU (London) Region.
|
||||||
|
- "eu-west-3"
|
||||||
|
- EU (Paris) Region.
|
||||||
- "eu-north-1"
|
- "eu-north-1"
|
||||||
- EU (Stockholm) Region.
|
- EU (Stockholm) Region.
|
||||||
|
- "eu-south-1"
|
||||||
|
- EU (Milan) Region.
|
||||||
- "EU"
|
- "EU"
|
||||||
- EU Region.
|
- EU Region.
|
||||||
- "ap-southeast-1"
|
- "ap-southeast-1"
|
||||||
|
@ -897,13 +979,27 @@ Used when creating buckets only.
|
||||||
- "ap-northeast-1"
|
- "ap-northeast-1"
|
||||||
- Asia Pacific (Tokyo) Region.
|
- Asia Pacific (Tokyo) Region.
|
||||||
- "ap-northeast-2"
|
- "ap-northeast-2"
|
||||||
- Asia Pacific (Seoul)
|
- Asia Pacific (Seoul) Region.
|
||||||
|
- "ap-northeast-3"
|
||||||
|
- Asia Pacific (Osaka-Local) Region.
|
||||||
- "ap-south-1"
|
- "ap-south-1"
|
||||||
- Asia Pacific (Mumbai)
|
- Asia Pacific (Mumbai) Region.
|
||||||
- "ap-east-1"
|
- "ap-east-1"
|
||||||
- Asia Pacific (Hong Kong)
|
- Asia Pacific (Hong Kong) Region.
|
||||||
- "sa-east-1"
|
- "sa-east-1"
|
||||||
- South America (Sao Paulo) Region.
|
- South America (Sao Paulo) Region.
|
||||||
|
- "me-south-1"
|
||||||
|
- Middle East (Bahrain) Region.
|
||||||
|
- "af-south-1"
|
||||||
|
- Africa (Cape Town) Region.
|
||||||
|
- "cn-north-1"
|
||||||
|
- China (Beijing) Region
|
||||||
|
- "cn-northwest-1"
|
||||||
|
- China (Ningxia) Region.
|
||||||
|
- "us-gov-east-1"
|
||||||
|
- AWS GovCloud (US-East) Region.
|
||||||
|
- "us-gov-west-1"
|
||||||
|
- AWS GovCloud (US) Region.
|
||||||
|
|
||||||
#### --s3-location-constraint
|
#### --s3-location-constraint
|
||||||
|
|
||||||
|
@ -1006,6 +1102,8 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||||
- Type: string
|
- Type: string
|
||||||
- Default: ""
|
- Default: ""
|
||||||
- Examples:
|
- Examples:
|
||||||
|
- "default"
|
||||||
|
- Owner gets Full_CONTROL. No one else has access rights (default).
|
||||||
- "private"
|
- "private"
|
||||||
- Owner gets FULL_CONTROL. No one else has access rights (default).
|
- Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||||
- "public-read"
|
- "public-read"
|
||||||
|
@ -1106,6 +1204,24 @@ The storage class to use when storing new objects in OSS.
|
||||||
|
|
||||||
#### --s3-storage-class
|
#### --s3-storage-class
|
||||||
|
|
||||||
|
The storage class to use when storing new objects in Tencent COS.
|
||||||
|
|
||||||
|
- Config: storage_class
|
||||||
|
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||||
|
- Type: string
|
||||||
|
- Default: ""
|
||||||
|
- Examples:
|
||||||
|
- ""
|
||||||
|
- Default
|
||||||
|
- "STANDARD"
|
||||||
|
- Standard storage class
|
||||||
|
- "ARCHIVE"
|
||||||
|
- Archive storage mode.
|
||||||
|
- "STANDARD_IA"
|
||||||
|
- Infrequent access storage mode.
|
||||||
|
|
||||||
|
#### --s3-storage-class
|
||||||
|
|
||||||
The storage class to use when storing new objects in S3.
|
The storage class to use when storing new objects in S3.
|
||||||
|
|
||||||
- Config: storage_class
|
- Config: storage_class
|
||||||
|
@ -1122,7 +1238,7 @@ The storage class to use when storing new objects in S3.
|
||||||
|
|
||||||
### Advanced Options
|
### Advanced Options
|
||||||
|
|
||||||
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
|
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
|
||||||
|
|
||||||
#### --s3-bucket-acl
|
#### --s3-bucket-acl
|
||||||
|
|
||||||
|
@ -1343,7 +1459,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||||
false - rclone will do this automatically based on the provider
|
false - rclone will do this automatically based on the provider
|
||||||
setting.
|
setting.
|
||||||
|
|
||||||
|
@ -2212,6 +2328,138 @@ d) Delete this remote
|
||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Tencent COS {#tencent-cos}
|
||||||
|
|
||||||
|
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
|
||||||
|
|
||||||
|
To configure access to Tencent COS, follow the steps below:
|
||||||
|
|
||||||
|
1. Run `rclone config` and select `n` for a new remote.
|
||||||
|
|
||||||
|
```
|
||||||
|
rclone config
|
||||||
|
No remotes found - make a new one
|
||||||
|
n) New remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
n/s/q> n
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Give the name of the configuration. For example, name it 'cos'.
|
||||||
|
|
||||||
|
```
|
||||||
|
name> cos
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Select `s3` storage.
|
||||||
|
|
||||||
|
```
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / 1Fichier
|
||||||
|
\ "fichier"
|
||||||
|
2 / Alias for an existing remote
|
||||||
|
\ "alias"
|
||||||
|
3 / Amazon Drive
|
||||||
|
\ "amazon cloud drive"
|
||||||
|
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
|
||||||
|
\ "s3"
|
||||||
|
[snip]
|
||||||
|
Storage> s3
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Select `TencentCOS` provider.
|
||||||
|
```
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Amazon Web Services (AWS) S3
|
||||||
|
\ "AWS"
|
||||||
|
[snip]
|
||||||
|
11 / Tencent Cloud Object Storage (COS)
|
||||||
|
\ "TencentCOS"
|
||||||
|
[snip]
|
||||||
|
provider> TencentCOS
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Enter your SecretId and SecretKey of Tencent Cloud.
|
||||||
|
|
||||||
|
```
|
||||||
|
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||||
|
Only applies if access_key_id and secret_access_key is blank.
|
||||||
|
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Enter AWS credentials in the next step
|
||||||
|
\ "false"
|
||||||
|
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||||
|
\ "true"
|
||||||
|
env_auth> 1
|
||||||
|
AWS Access Key ID.
|
||||||
|
Leave blank for anonymous access or runtime credentials.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
access_key_id> AKIDxxxxxxxxxx
|
||||||
|
AWS Secret Access Key (password)
|
||||||
|
Leave blank for anonymous access or runtime credentials.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
secret_access_key> xxxxxxxxxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
|
||||||
|
|
||||||
|
```
|
||||||
|
1 / Beijing Region.
|
||||||
|
\ "cos.ap-beijing.myqcloud.com"
|
||||||
|
2 / Nanjing Region.
|
||||||
|
\ "cos.ap-nanjing.myqcloud.com"
|
||||||
|
3 / Shanghai Region.
|
||||||
|
\ "cos.ap-shanghai.myqcloud.com"
|
||||||
|
4 / Guangzhou Region.
|
||||||
|
\ "cos.ap-guangzhou.myqcloud.com"
|
||||||
|
[snip]
|
||||||
|
endpoint> 4
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Choose acl and storage class.
|
||||||
|
|
||||||
|
```
|
||||||
|
Note that this ACL is applied when server side copying objects as S3
|
||||||
|
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Owner gets Full_CONTROL. No one else has access rights (default).
|
||||||
|
\ "default"
|
||||||
|
[snip]
|
||||||
|
acl> 1
|
||||||
|
The storage class to use when storing new objects in Tencent COS.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
1 / Default
|
||||||
|
\ ""
|
||||||
|
[snip]
|
||||||
|
storage_class> 1
|
||||||
|
Edit advanced config? (y/n)
|
||||||
|
y) Yes
|
||||||
|
n) No (default)
|
||||||
|
y/n> n
|
||||||
|
Remote config
|
||||||
|
--------------------
|
||||||
|
[cos]
|
||||||
|
type = s3
|
||||||
|
provider = TencentCOS
|
||||||
|
env_auth = false
|
||||||
|
access_key_id = xxx
|
||||||
|
secret_access_key = xxx
|
||||||
|
endpoint = cos.ap-guangzhou.myqcloud.com
|
||||||
|
acl = default
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
Current remotes:
|
||||||
|
|
||||||
|
Name Type
|
||||||
|
==== ====
|
||||||
|
cos s3
|
||||||
|
```
|
||||||
|
|
||||||
### Netease NOS ###
|
### Netease NOS ###
|
||||||
|
|
||||||
For Netease NOS configure as per the configurator `rclone config`
|
For Netease NOS configure as per the configurator `rclone config`
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
v1.53.0
|
v1.53.4
|
|
@ -272,7 +272,7 @@ func (s *StatsInfo) String() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||||
dateString,
|
dateString,
|
||||||
fs.SizeSuffix(s.bytes),
|
fs.SizeSuffix(s.bytes),
|
||||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||||
|
@ -283,6 +283,7 @@ func (s *StatsInfo) String() string {
|
||||||
)
|
)
|
||||||
|
|
||||||
if !fs.Config.StatsOneLine {
|
if !fs.Config.StatsOneLine {
|
||||||
|
_, _ = buf.WriteRune('\n')
|
||||||
errorDetails := ""
|
errorDetails := ""
|
||||||
switch {
|
switch {
|
||||||
case s.fatalError:
|
case s.fatalError:
|
||||||
|
@ -291,6 +292,7 @@ func (s *StatsInfo) String() string {
|
||||||
errorDetails = " (retrying may help)"
|
errorDetails = " (retrying may help)"
|
||||||
case s.errors != 0:
|
case s.errors != 0:
|
||||||
errorDetails = " (no need to retry)"
|
errorDetails = " (no need to retry)"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add only non zero stats
|
// Add only non zero stats
|
||||||
|
@ -335,6 +337,8 @@ func (s *StatsInfo) String() string {
|
||||||
// Transferred returns list of all completed transfers including checked and
|
// Transferred returns list of all completed transfers including checked and
|
||||||
// failed ones.
|
// failed ones.
|
||||||
func (s *StatsInfo) Transferred() []TransferSnapshot {
|
func (s *StatsInfo) Transferred() []TransferSnapshot {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
ts := make([]TransferSnapshot, 0, len(s.startedTransfers))
|
ts := make([]TransferSnapshot, 0, len(s.startedTransfers))
|
||||||
|
|
||||||
for _, tr := range s.startedTransfers {
|
for _, tr := range s.startedTransfers {
|
||||||
|
|
|
@ -366,6 +366,8 @@ func (sg *statsGroups) sum() *StatsInfo {
|
||||||
sum.lastError = stats.lastError
|
sum.lastError = stats.lastError
|
||||||
}
|
}
|
||||||
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
|
||||||
|
sum.oldDuration += stats.oldDuration
|
||||||
|
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
|
||||||
}
|
}
|
||||||
stats.mu.RUnlock()
|
stats.mu.RUnlock()
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,8 +4,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStatsGroupOperations(t *testing.T) {
|
func TestStatsGroupOperations(t *testing.T) {
|
||||||
|
@ -43,17 +45,26 @@ func TestStatsGroupOperations(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
stats1 := NewStats()
|
stats1 := NewStats()
|
||||||
stats1.bytes = 5
|
stats1.bytes = 5
|
||||||
stats1.errors = 5
|
stats1.errors = 6
|
||||||
|
stats1.oldDuration = time.Second
|
||||||
|
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
|
||||||
stats2 := NewStats()
|
stats2 := NewStats()
|
||||||
|
stats2.bytes = 10
|
||||||
|
stats2.errors = 12
|
||||||
|
stats2.oldDuration = 2 * time.Second
|
||||||
|
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
|
||||||
sg := newStatsGroups()
|
sg := newStatsGroups()
|
||||||
sg.set("test1", stats1)
|
sg.set("test1", stats1)
|
||||||
sg.set("test2", stats2)
|
sg.set("test2", stats2)
|
||||||
sum := sg.sum()
|
sum := sg.sum()
|
||||||
if sum.bytes != stats1.bytes+stats2.bytes {
|
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
|
||||||
t.Fatalf("sum() => bytes %d, expected %d", sum.bytes, stats1.bytes+stats2.bytes)
|
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
|
||||||
}
|
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
|
||||||
if sum.errors != stats1.errors+stats2.errors {
|
// dict can iterate in either order
|
||||||
t.Fatalf("sum() => errors %d, expected %d", sum.errors, stats1.errors+stats2.errors)
|
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
|
||||||
|
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
|
||||||
|
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
|
||||||
|
assert.Equal(t, b, sum.oldTimeRanges)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -72,8 +72,16 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||||
for _, tr := range tm.items {
|
for _, tr := range tm.items {
|
||||||
s = append(s, tr)
|
s = append(s, tr)
|
||||||
}
|
}
|
||||||
|
// sort by time first and if equal by name. Note that the relatively
|
||||||
|
// low time resolution on Windows can cause equal times.
|
||||||
sort.Slice(s, func(i, j int) bool {
|
sort.Slice(s, func(i, j int) bool {
|
||||||
return s[i].startedAt.Before(s[j].startedAt)
|
a, b := s[i], s[j]
|
||||||
|
if a.startedAt.Before(b.startedAt) {
|
||||||
|
return true
|
||||||
|
} else if !a.startedAt.Equal(b.startedAt) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return a.remote < b.remote
|
||||||
})
|
})
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ var (
|
||||||
noTransport = new(sync.Once)
|
noTransport = new(sync.Once)
|
||||||
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
tpsBucket *rate.Limiter // for limiting number of http transactions per second
|
||||||
cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
|
cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
|
||||||
|
logMutex sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
||||||
// StartHTTPTokenBucket starts the token bucket if necessary
|
// StartHTTPTokenBucket starts the token bucket if necessary
|
||||||
|
@ -328,15 +329,18 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
||||||
if t.dump&fs.DumpAuth == 0 {
|
if t.dump&fs.DumpAuth == 0 {
|
||||||
buf = cleanAuths(buf)
|
buf = cleanAuths(buf)
|
||||||
}
|
}
|
||||||
|
logMutex.Lock()
|
||||||
fs.Debugf(nil, "%s", separatorReq)
|
fs.Debugf(nil, "%s", separatorReq)
|
||||||
fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
|
fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
|
||||||
fs.Debugf(nil, "%s", string(buf))
|
fs.Debugf(nil, "%s", string(buf))
|
||||||
fs.Debugf(nil, "%s", separatorReq)
|
fs.Debugf(nil, "%s", separatorReq)
|
||||||
|
logMutex.Unlock()
|
||||||
}
|
}
|
||||||
// Do round trip
|
// Do round trip
|
||||||
resp, err = t.Transport.RoundTrip(req)
|
resp, err = t.Transport.RoundTrip(req)
|
||||||
// Logf response
|
// Logf response
|
||||||
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
|
logMutex.Lock()
|
||||||
fs.Debugf(nil, "%s", separatorResp)
|
fs.Debugf(nil, "%s", separatorResp)
|
||||||
fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
|
fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -346,6 +350,7 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
||||||
fs.Debugf(nil, "%s", string(buf))
|
fs.Debugf(nil, "%s", string(buf))
|
||||||
}
|
}
|
||||||
fs.Debugf(nil, "%s", separatorResp)
|
fs.Debugf(nil, "%s", separatorResp)
|
||||||
|
logMutex.Unlock()
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
checkServerTime(req, resp)
|
checkServerTime(req, resp)
|
||||||
|
|
|
@ -172,9 +172,12 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if ht == hash.None {
|
if ht == hash.None {
|
||||||
checksumWarning.Do(func() {
|
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
|
||||||
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
|
if common.Count() == 0 {
|
||||||
})
|
checksumWarning.Do(func() {
|
||||||
|
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
|
||||||
|
})
|
||||||
|
}
|
||||||
fs.Debugf(src, "Size of src and dst objects identical")
|
fs.Debugf(src, "Size of src and dst objects identical")
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
|
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
|
||||||
|
@ -1522,12 +1525,11 @@ func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if fs.Config.Suffix != "" {
|
||||||
if srcFileName == "" {
|
|
||||||
return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir"))
|
|
||||||
}
|
|
||||||
// --backup-dir is not set but --suffix is - use the destination as the backupDir
|
// --backup-dir is not set but --suffix is - use the destination as the backupDir
|
||||||
backupDir = fdst
|
backupDir = fdst
|
||||||
|
} else {
|
||||||
|
return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty"))
|
||||||
}
|
}
|
||||||
if !CanServerSideMove(backupDir) {
|
if !CanServerSideMove(backupDir) {
|
||||||
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
|
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
|
||||||
|
|
|
@ -37,9 +37,9 @@ func TestParseDuration(t *testing.T) {
|
||||||
{"1x", 0, true},
|
{"1x", 0, true},
|
||||||
{"off", time.Duration(DurationOff), false},
|
{"off", time.Duration(DurationOff), false},
|
||||||
{"1h2m3s", time.Hour + 2*time.Minute + 3*time.Second, false},
|
{"1h2m3s", time.Hour + 2*time.Minute + 3*time.Second, false},
|
||||||
{"2001-02-03", time.Since(time.Date(2001, 2, 3, 0, 0, 0, 0, time.Local)), false},
|
{"2001-02-03", time.Since(time.Date(2001, 2, 3, 0, 0, 0, 0, time.UTC)), false},
|
||||||
{"2001-02-03 10:11:12", time.Since(time.Date(2001, 2, 3, 10, 11, 12, 0, time.Local)), false},
|
{"2001-02-03 10:11:12", time.Since(time.Date(2001, 2, 3, 10, 11, 12, 0, time.UTC)), false},
|
||||||
{"2001-02-03T10:11:12", time.Since(time.Date(2001, 2, 3, 10, 11, 12, 0, time.Local)), false},
|
{"2001-02-03T10:11:12", time.Since(time.Date(2001, 2, 3, 10, 11, 12, 0, time.UTC)), false},
|
||||||
{"2001-02-03T10:11:12.123Z", time.Since(time.Date(2001, 2, 3, 10, 11, 12, 123, time.UTC)), false},
|
{"2001-02-03T10:11:12.123Z", time.Since(time.Date(2001, 2, 3, 10, 11, 12, 123, time.UTC)), false},
|
||||||
} {
|
} {
|
||||||
duration, err := ParseDuration(test.in)
|
duration, err := ParseDuration(test.in)
|
||||||
|
|
|
@ -353,17 +353,22 @@ func init() {
|
||||||
- command - a string with the command name
|
- command - a string with the command name
|
||||||
- arg - a list of arguments for the backend command
|
- arg - a list of arguments for the backend command
|
||||||
- opt - a map of string to string of options
|
- opt - a map of string to string of options
|
||||||
|
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR")
|
||||||
|
- defaults to "COMBINED_OUTPUT" if not set
|
||||||
|
- the STREAM returnTypes will write the output to the body of the HTTP message
|
||||||
|
- the COMBINED_OUTPUT will write the output to the "result" parameter
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
|
|
||||||
- result - result from the backend command
|
- result - result from the backend command
|
||||||
|
- only set when using returnType "COMBINED_OUTPUT"
|
||||||
- error - set if rclone exits with an error code
|
- error - set if rclone exits with an error code
|
||||||
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT". "STREAM_ONLY_STDERR")
|
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR")
|
||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
|
rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
|
||||||
rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
|
rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
|
|
||||||
|
@ -379,14 +384,13 @@ OR
|
||||||
"result": "<Raw command line output>"
|
"result": "<Raw command line output>"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// rcRunCommand runs an rclone command with the given args and flags
|
// rcRunCommand runs an rclone command with the given args and flags
|
||||||
func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
|
func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
|
||||||
|
|
||||||
command, err := in.GetString("command")
|
command, err := in.GetString("command")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
command = ""
|
command = ""
|
||||||
|
@ -409,7 +413,7 @@ func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
|
||||||
returnType = "COMBINED_OUTPUT"
|
returnType = "COMBINED_OUTPUT"
|
||||||
}
|
}
|
||||||
|
|
||||||
var httpResponse *http.ResponseWriter
|
var httpResponse http.ResponseWriter
|
||||||
httpResponse, err = in.GetHTTPResponseWriter()
|
httpResponse, err = in.GetHTTPResponseWriter()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Errorf("response object is required\n" + err.Error())
|
return nil, errors.Errorf("response object is required\n" + err.Error())
|
||||||
|
@ -460,12 +464,14 @@ func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
|
||||||
"error": false,
|
"error": false,
|
||||||
}, nil
|
}, nil
|
||||||
} else if returnType == "STREAM_ONLY_STDOUT" {
|
} else if returnType == "STREAM_ONLY_STDOUT" {
|
||||||
cmd.Stdout = *httpResponse
|
cmd.Stdout = httpResponse
|
||||||
} else if returnType == "STREAM_ONLY_STDERR" {
|
} else if returnType == "STREAM_ONLY_STDERR" {
|
||||||
cmd.Stderr = *httpResponse
|
cmd.Stderr = httpResponse
|
||||||
} else if returnType == "STREAM" {
|
} else if returnType == "STREAM" {
|
||||||
cmd.Stdout = *httpResponse
|
cmd.Stdout = httpResponse
|
||||||
cmd.Stderr = *httpResponse
|
cmd.Stderr = httpResponse
|
||||||
|
} else {
|
||||||
|
return nil, errors.Errorf("Unknown returnType %q", returnType)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = cmd.Run()
|
err = cmd.Run()
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -22,6 +23,12 @@ func TestMain(m *testing.M) {
|
||||||
fmt.Printf("rclone %s\n", fs.Version)
|
fmt.Printf("rclone %s\n", fs.Version)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
// Pretend to error if we have an unknown command
|
||||||
|
if os.Args[len(os.Args)-1] == "unknown_command" {
|
||||||
|
fmt.Printf("rclone %s\n", fs.Version)
|
||||||
|
fmt.Fprintf(os.Stderr, "Unknown command\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
os.Exit(m.Run())
|
os.Exit(m.Run())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,17 +143,56 @@ func TestCoreQuit(t *testing.T) {
|
||||||
func TestCoreCommand(t *testing.T) {
|
func TestCoreCommand(t *testing.T) {
|
||||||
call := Calls.Get("core/command")
|
call := Calls.Get("core/command")
|
||||||
|
|
||||||
var httpResponse http.ResponseWriter = httptest.NewRecorder()
|
test := func(command string, returnType string, wantOutput string, fail bool) {
|
||||||
|
var rec = httptest.NewRecorder()
|
||||||
|
var w http.ResponseWriter = rec
|
||||||
|
|
||||||
in := Params{
|
in := Params{
|
||||||
"command": "version",
|
"command": command,
|
||||||
"opt": map[string]string{},
|
"opt": map[string]string{},
|
||||||
"arg": []string{},
|
"arg": []string{},
|
||||||
"_response": &httpResponse,
|
"_response": w,
|
||||||
|
}
|
||||||
|
if returnType != "" {
|
||||||
|
in["returnType"] = returnType
|
||||||
|
} else {
|
||||||
|
returnType = "COMBINED_OUTPUT"
|
||||||
|
}
|
||||||
|
stream := strings.HasPrefix(returnType, "STREAM")
|
||||||
|
got, err := call.Fn(context.Background(), in)
|
||||||
|
if stream && fail {
|
||||||
|
assert.Error(t, err)
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !stream {
|
||||||
|
assert.Equal(t, wantOutput, got["result"])
|
||||||
|
assert.Equal(t, fail, got["error"])
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, wantOutput, rec.Body.String())
|
||||||
|
}
|
||||||
|
assert.Equal(t, http.StatusOK, rec.Result().StatusCode)
|
||||||
}
|
}
|
||||||
got, err := call.Fn(context.Background(), in)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, fmt.Sprintf("rclone %s\n", fs.Version), got["result"])
|
version := fmt.Sprintf("rclone %s\n", fs.Version)
|
||||||
assert.Equal(t, false, got["error"])
|
errorString := "Unknown command\n"
|
||||||
|
t.Run("OK", func(t *testing.T) {
|
||||||
|
test("version", "", version, false)
|
||||||
|
})
|
||||||
|
t.Run("Fail", func(t *testing.T) {
|
||||||
|
test("unknown_command", "", version+errorString, true)
|
||||||
|
})
|
||||||
|
t.Run("Combined", func(t *testing.T) {
|
||||||
|
test("unknown_command", "COMBINED_OUTPUT", version+errorString, true)
|
||||||
|
})
|
||||||
|
t.Run("Stderr", func(t *testing.T) {
|
||||||
|
test("unknown_command", "STREAM_ONLY_STDERR", errorString, true)
|
||||||
|
})
|
||||||
|
t.Run("Stdout", func(t *testing.T) {
|
||||||
|
test("unknown_command", "STREAM_ONLY_STDOUT", version, true)
|
||||||
|
})
|
||||||
|
t.Run("Stream", func(t *testing.T) {
|
||||||
|
test("unknown_command", "STREAM", version+errorString, true)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,6 +79,15 @@ func Reshape(out interface{}, in interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Copy shallow copies the Params
|
||||||
|
func (p Params) Copy() (out Params) {
|
||||||
|
out = make(Params, len(p))
|
||||||
|
for k, v := range p {
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// Get gets a parameter from the input
|
// Get gets a parameter from the input
|
||||||
//
|
//
|
||||||
// If the parameter isn't found then error will be of type
|
// If the parameter isn't found then error will be of type
|
||||||
|
@ -112,15 +121,15 @@ func (p Params) GetHTTPRequest() (*http.Request, error) {
|
||||||
//
|
//
|
||||||
// If the parameter isn't found then error will be of type
|
// If the parameter isn't found then error will be of type
|
||||||
// ErrParamNotFound and the returned value will be nil.
|
// ErrParamNotFound and the returned value will be nil.
|
||||||
func (p Params) GetHTTPResponseWriter() (*http.ResponseWriter, error) {
|
func (p Params) GetHTTPResponseWriter() (http.ResponseWriter, error) {
|
||||||
key := "_response"
|
key := "_response"
|
||||||
value, err := p.Get(key)
|
value, err := p.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
request, ok := value.(*http.ResponseWriter)
|
request, ok := value.(http.ResponseWriter)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrParamInvalid{errors.Errorf("expecting *http.ResponseWriter value for key %q (was %T)", key, value)}
|
return nil, ErrParamInvalid{errors.Errorf("expecting http.ResponseWriter value for key %q (was %T)", key, value)}
|
||||||
}
|
}
|
||||||
return request, nil
|
return request, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,8 @@ package rc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -61,6 +63,19 @@ func TestReshape(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParamsCopy(t *testing.T) {
|
||||||
|
in := Params{
|
||||||
|
"ok": 1,
|
||||||
|
"x": "seventeen",
|
||||||
|
"nil": nil,
|
||||||
|
}
|
||||||
|
out := in.Copy()
|
||||||
|
assert.Equal(t, in, out)
|
||||||
|
if &in == &out {
|
||||||
|
t.Error("didn't copy")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParamsGet(t *testing.T) {
|
func TestParamsGet(t *testing.T) {
|
||||||
in := Params{
|
in := Params{
|
||||||
"ok": 1,
|
"ok": 1,
|
||||||
|
@ -346,3 +361,53 @@ func TestParamsGetStructMissingOK(t *testing.T) {
|
||||||
assert.Equal(t, 4.2, out.Float)
|
assert.Equal(t, 4.2, out.Float)
|
||||||
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
|
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParamsGetHTTPRequest(t *testing.T) {
|
||||||
|
in := Params{}
|
||||||
|
req, err := in.GetHTTPRequest()
|
||||||
|
assert.Nil(t, req)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, true, IsErrParamNotFound(err), err.Error())
|
||||||
|
|
||||||
|
in = Params{
|
||||||
|
"_request": 42,
|
||||||
|
}
|
||||||
|
req, err = in.GetHTTPRequest()
|
||||||
|
assert.Nil(t, req)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, true, IsErrParamInvalid(err), err.Error())
|
||||||
|
|
||||||
|
r := new(http.Request)
|
||||||
|
in = Params{
|
||||||
|
"_request": r,
|
||||||
|
}
|
||||||
|
req, err = in.GetHTTPRequest()
|
||||||
|
assert.NotNil(t, req)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, r, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParamsGetHTTPResponseWriter(t *testing.T) {
|
||||||
|
in := Params{}
|
||||||
|
wr, err := in.GetHTTPResponseWriter()
|
||||||
|
assert.Nil(t, wr)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, true, IsErrParamNotFound(err), err.Error())
|
||||||
|
|
||||||
|
in = Params{
|
||||||
|
"_response": 42,
|
||||||
|
}
|
||||||
|
wr, err = in.GetHTTPResponseWriter()
|
||||||
|
assert.Nil(t, wr)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, true, IsErrParamInvalid(err), err.Error())
|
||||||
|
|
||||||
|
var w http.ResponseWriter = httptest.NewRecorder()
|
||||||
|
in = Params{
|
||||||
|
"_response": w,
|
||||||
|
}
|
||||||
|
wr, err = in.GetHTTPResponseWriter()
|
||||||
|
assert.NotNil(t, wr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, w, wr)
|
||||||
|
}
|
||||||
|
|
|
@ -183,7 +183,7 @@ func writeError(path string, in rc.Params, w http.ResponseWriter, err error, sta
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// can't return the error at this point
|
// can't return the error at this point
|
||||||
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
|
fs.Errorf(nil, "rc: writeError: failed to write JSON output from %#v: %v", in, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,6 +267,9 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string)
|
||||||
writeError(path, in, w, errors.Errorf("authentication must be set up on the rc server to use %q or the --rc-no-auth flag must be in use", path), http.StatusForbidden)
|
writeError(path, in, w, errors.Errorf("authentication must be set up on the rc server to use %q or the --rc-no-auth flag must be in use", path), http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inOrig := in.Copy()
|
||||||
|
|
||||||
if call.NeedsRequest {
|
if call.NeedsRequest {
|
||||||
// Add the request to RC
|
// Add the request to RC
|
||||||
in["_request"] = r
|
in["_request"] = r
|
||||||
|
@ -279,7 +282,7 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string)
|
||||||
// Check to see if it is async or not
|
// Check to see if it is async or not
|
||||||
isAsync, err := in.GetBool("_async")
|
isAsync, err := in.GetBool("_async")
|
||||||
if rc.NotErrParamNotFound(err) {
|
if rc.NotErrParamNotFound(err) {
|
||||||
writeError(path, in, w, err, http.StatusBadRequest)
|
writeError(path, inOrig, w, err, http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
delete(in, "_async") // remove the async parameter after parsing so vfs operations don't get confused
|
delete(in, "_async") // remove the async parameter after parsing so vfs operations don't get confused
|
||||||
|
@ -294,7 +297,7 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string)
|
||||||
w.Header().Add("x-rclone-jobid", fmt.Sprintf("%d", jobID))
|
w.Header().Add("x-rclone-jobid", fmt.Sprintf("%d", jobID))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(path, in, w, err, http.StatusInternalServerError)
|
writeError(path, inOrig, w, err, http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if out == nil {
|
if out == nil {
|
||||||
|
@ -305,8 +308,8 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string)
|
||||||
err = rc.WriteJSON(w, out)
|
err = rc.WriteJSON(w, out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// can't return the error at this point - but have a go anyway
|
// can't return the error at this point - but have a go anyway
|
||||||
writeError(path, in, w, err, http.StatusInternalServerError)
|
writeError(path, inOrig, w, err, http.StatusInternalServerError)
|
||||||
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
|
fs.Errorf(nil, "rc: handlePost: failed to write JSON output: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,18 +390,20 @@ func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string)
|
||||||
s.serveRoot(w, r)
|
s.serveRoot(w, r)
|
||||||
return
|
return
|
||||||
case s.files != nil:
|
case s.files != nil:
|
||||||
pluginsMatchResult := webgui.PluginsMatch.FindStringSubmatch(path)
|
if s.opt.WebUI {
|
||||||
|
pluginsMatchResult := webgui.PluginsMatch.FindStringSubmatch(path)
|
||||||
|
|
||||||
if s.opt.WebUI && pluginsMatchResult != nil && len(pluginsMatchResult) > 2 {
|
if pluginsMatchResult != nil && len(pluginsMatchResult) > 2 {
|
||||||
ok := webgui.ServePluginOK(w, r, pluginsMatchResult)
|
ok := webgui.ServePluginOK(w, r, pluginsMatchResult)
|
||||||
if !ok {
|
if !ok {
|
||||||
r.URL.Path = fmt.Sprintf("/%s/%s/app/build/%s", pluginsMatchResult[1], pluginsMatchResult[2], pluginsMatchResult[3])
|
r.URL.Path = fmt.Sprintf("/%s/%s/app/build/%s", pluginsMatchResult[1], pluginsMatchResult[2], pluginsMatchResult[3])
|
||||||
s.pluginsHandler.ServeHTTP(w, r)
|
s.pluginsHandler.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
} else if webgui.ServePluginWithReferrerOK(w, r, path) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
return
|
|
||||||
} else if s.opt.WebUI && webgui.ServePluginWithReferrerOK(w, r, path) {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// Serve the files
|
// Serve the files
|
||||||
r.URL.Path = "/" + path
|
r.URL.Path = "/" + path
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -16,6 +17,7 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
)
|
)
|
||||||
|
@ -27,6 +29,21 @@ const (
|
||||||
remoteURL = "[" + testFs + "]/" // initial URL path to fetch from that remote
|
remoteURL = "[" + testFs + "]/" // initial URL path to fetch from that remote
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
// Pretend to be rclone version if we have a version string parameter
|
||||||
|
if os.Args[len(os.Args)-1] == "version" {
|
||||||
|
fmt.Printf("rclone %s\n", fs.Version)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
// Pretend to error if we have an unknown command
|
||||||
|
if os.Args[len(os.Args)-1] == "unknown_command" {
|
||||||
|
fmt.Printf("rclone %s\n", fs.Version)
|
||||||
|
fmt.Fprintf(os.Stderr, "Unknown command\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
// Test the RC server runs and we can do HTTP fetches from it.
|
// Test the RC server runs and we can do HTTP fetches from it.
|
||||||
// We'll do the majority of the testing with the httptest framework
|
// We'll do the majority of the testing with the httptest framework
|
||||||
func TestRcServer(t *testing.T) {
|
func TestRcServer(t *testing.T) {
|
||||||
|
@ -455,6 +472,73 @@ func TestRC(t *testing.T) {
|
||||||
testServer(t, tests, &opt)
|
testServer(t, tests, &opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRCWithAuth(t *testing.T) {
|
||||||
|
tests := []testRun{{
|
||||||
|
Name: "core-command",
|
||||||
|
URL: "core/command",
|
||||||
|
Method: "POST",
|
||||||
|
Body: `command=version`,
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Expected: fmt.Sprintf(`{
|
||||||
|
"error": false,
|
||||||
|
"result": "rclone %s\n"
|
||||||
|
}
|
||||||
|
`, fs.Version),
|
||||||
|
}, {
|
||||||
|
Name: "core-command-bad-returnType",
|
||||||
|
URL: "core/command",
|
||||||
|
Method: "POST",
|
||||||
|
Body: `command=version&returnType=POTATO`,
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
Expected: `{
|
||||||
|
"error": "Unknown returnType \"POTATO\"",
|
||||||
|
"input": {
|
||||||
|
"command": "version",
|
||||||
|
"returnType": "POTATO"
|
||||||
|
},
|
||||||
|
"path": "core/command",
|
||||||
|
"status": 500
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
}, {
|
||||||
|
Name: "core-command-stream",
|
||||||
|
URL: "core/command",
|
||||||
|
Method: "POST",
|
||||||
|
Body: `command=version&returnType=STREAM`,
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Expected: fmt.Sprintf(`rclone %s
|
||||||
|
{}
|
||||||
|
`, fs.Version),
|
||||||
|
}, {
|
||||||
|
Name: "core-command-stream-error",
|
||||||
|
URL: "core/command",
|
||||||
|
Method: "POST",
|
||||||
|
Body: `command=unknown_command&returnType=STREAM`,
|
||||||
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Expected: fmt.Sprintf(`rclone %s
|
||||||
|
Unknown command
|
||||||
|
{
|
||||||
|
"error": "exit status 1",
|
||||||
|
"input": {
|
||||||
|
"command": "unknown_command",
|
||||||
|
"returnType": "STREAM"
|
||||||
|
},
|
||||||
|
"path": "core/command",
|
||||||
|
"status": 500
|
||||||
|
}
|
||||||
|
`, fs.Version),
|
||||||
|
}}
|
||||||
|
opt := newTestOpt()
|
||||||
|
opt.Serve = true
|
||||||
|
opt.Files = testFs
|
||||||
|
opt.NoAuth = true
|
||||||
|
testServer(t, tests, &opt)
|
||||||
|
}
|
||||||
|
|
||||||
func TestMethods(t *testing.T) {
|
func TestMethods(t *testing.T) {
|
||||||
tests := []testRun{{
|
tests := []testRun{{
|
||||||
Name: "options",
|
Name: "options",
|
||||||
|
|
|
@ -15,6 +15,8 @@ import (
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
|
"github.com/rclone/rclone/lib/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PackageJSON is the structure of package.json of a plugin
|
// PackageJSON is the structure of package.json of a plugin
|
||||||
|
@ -62,6 +64,8 @@ var (
|
||||||
PluginsPath string
|
PluginsPath string
|
||||||
pluginsConfigPath string
|
pluginsConfigPath string
|
||||||
availablePluginsJSONPath = "availablePlugins.json"
|
availablePluginsJSONPath = "availablePlugins.json"
|
||||||
|
initSuccess = false
|
||||||
|
initMutex = &sync.Mutex{}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -69,11 +73,6 @@ func init() {
|
||||||
PluginsPath = filepath.Join(cachePath, "plugins")
|
PluginsPath = filepath.Join(cachePath, "plugins")
|
||||||
pluginsConfigPath = filepath.Join(PluginsPath, "config")
|
pluginsConfigPath = filepath.Join(PluginsPath, "config")
|
||||||
|
|
||||||
loadedPlugins = newPlugins(availablePluginsJSONPath)
|
|
||||||
err := loadedPlugins.readFromFile()
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "error reading available plugins: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Plugins represents the structure how plugins are saved onto disk
|
// Plugins represents the structure how plugins are saved onto disk
|
||||||
|
@ -90,9 +89,25 @@ func newPlugins(fileName string) *Plugins {
|
||||||
return &p
|
return &p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func initPluginsOrError() error {
|
||||||
|
if !rcflags.Opt.WebUI {
|
||||||
|
return errors.New("WebUI needs to be enabled for plugins to work")
|
||||||
|
}
|
||||||
|
initMutex.Lock()
|
||||||
|
defer initMutex.Unlock()
|
||||||
|
if !initSuccess {
|
||||||
|
loadedPlugins = newPlugins(availablePluginsJSONPath)
|
||||||
|
err := loadedPlugins.readFromFile()
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(nil, "error reading available plugins: %v", err)
|
||||||
|
}
|
||||||
|
initSuccess = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Plugins) readFromFile() (err error) {
|
func (p *Plugins) readFromFile() (err error) {
|
||||||
//p.mutex.Lock()
|
|
||||||
//defer p.mutex.Unlock()
|
|
||||||
err = CreatePathIfNotExist(pluginsConfigPath)
|
err = CreatePathIfNotExist(pluginsConfigPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -169,8 +184,6 @@ func (p *Plugins) addTestPlugin(pluginName string, testURL string, handlesType [
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Plugins) writeToFile() (err error) {
|
func (p *Plugins) writeToFile() (err error) {
|
||||||
//p.mutex.Lock()
|
|
||||||
//defer p.mutex.Unlock()
|
|
||||||
availablePluginsJSON := filepath.Join(pluginsConfigPath, p.fileName)
|
availablePluginsJSON := filepath.Join(pluginsConfigPath, p.fileName)
|
||||||
|
|
||||||
file, err := json.MarshalIndent(p, "", " ")
|
file, err := json.MarshalIndent(p, "", " ")
|
||||||
|
@ -290,6 +303,10 @@ var referrerPathReg = regexp.MustCompile("^(https?):\\/\\/(.+):([0-9]+)?\\/(.*)\
|
||||||
// sends a redirect to actual url. This function is useful for plugins to refer to absolute paths when
|
// sends a redirect to actual url. This function is useful for plugins to refer to absolute paths when
|
||||||
// the referrer in http.Request is set
|
// the referrer in http.Request is set
|
||||||
func ServePluginWithReferrerOK(w http.ResponseWriter, r *http.Request, path string) (ok bool) {
|
func ServePluginWithReferrerOK(w http.ResponseWriter, r *http.Request, path string) (ok bool) {
|
||||||
|
err := initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
referrer := r.Referer()
|
referrer := r.Referer()
|
||||||
referrerPathMatch := referrerPathReg.FindStringSubmatch(referrer)
|
referrerPathMatch := referrerPathReg.FindStringSubmatch(referrer)
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,10 @@ Eg
|
||||||
}
|
}
|
||||||
|
|
||||||
func rcListTestPlugins(_ context.Context, _ rc.Params) (out rc.Params, err error) {
|
func rcListTestPlugins(_ context.Context, _ rc.Params) (out rc.Params, err error) {
|
||||||
|
err = initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return rc.Params{
|
return rc.Params{
|
||||||
"loadedTestPlugins": filterPlugins(loadedPlugins, func(json *PackageJSON) bool { return json.isTesting() }),
|
"loadedTestPlugins": filterPlugins(loadedPlugins, func(json *PackageJSON) bool { return json.isTesting() }),
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -45,7 +49,7 @@ func init() {
|
||||||
|
|
||||||
This takes the following parameters
|
This takes the following parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
@ -54,6 +58,10 @@ Eg
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func rcRemoveTestPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
func rcRemoveTestPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
|
err = initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
name, err := in.GetString("name")
|
name, err := in.GetString("name")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -85,6 +93,10 @@ Eg
|
||||||
}
|
}
|
||||||
|
|
||||||
func rcAddPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
func rcAddPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
|
err = initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
pluginURL, err := in.GetString("url")
|
pluginURL, err := in.GetString("url")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -192,6 +204,10 @@ Eg
|
||||||
}
|
}
|
||||||
|
|
||||||
func rcGetPlugins(_ context.Context, _ rc.Params) (out rc.Params, err error) {
|
func rcGetPlugins(_ context.Context, _ rc.Params) (out rc.Params, err error) {
|
||||||
|
err = initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
err = loadedPlugins.readFromFile()
|
err = loadedPlugins.readFromFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -212,7 +228,7 @@ func init() {
|
||||||
|
|
||||||
This takes parameters
|
This takes parameters
|
||||||
|
|
||||||
- name: name of the plugin in the format <author>/<plugin_name>
|
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
|
||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
|
@ -222,6 +238,10 @@ Eg
|
||||||
}
|
}
|
||||||
|
|
||||||
func rcRemovePlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
func rcRemovePlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
|
err = initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
name, err := in.GetString("name")
|
name, err := in.GetString("name")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -260,6 +280,10 @@ Eg
|
||||||
}
|
}
|
||||||
|
|
||||||
func rcGetPluginsForType(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
func rcGetPluginsForType(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
|
err = initPluginsOrError()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
handlesType, err := in.GetString("type")
|
handlesType, err := in.GetString("type")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handlesType = ""
|
handlesType = ""
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -18,6 +19,10 @@ const testPluginAuthor = "rclone"
|
||||||
const testPluginKey = testPluginAuthor + "/" + testPluginName
|
const testPluginKey = testPluginAuthor + "/" + testPluginName
|
||||||
const testPluginURL = "https://github.com/" + testPluginAuthor + "/" + testPluginName + "/"
|
const testPluginURL = "https://github.com/" + testPluginAuthor + "/" + testPluginName + "/"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rcflags.Opt.WebUI = true
|
||||||
|
}
|
||||||
|
|
||||||
func setCacheDir(t *testing.T) string {
|
func setCacheDir(t *testing.T) string {
|
||||||
cacheDir, err := ioutil.TempDir("", "rclone-cache-dir")
|
cacheDir, err := ioutil.TempDir("", "rclone-cache-dir")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
|
@ -1590,7 +1590,7 @@ func TestSyncCopyDest(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with BackupDir set
|
// Test with BackupDir set
|
||||||
func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) {
|
||||||
r := fstest.NewRun(t)
|
r := fstest.NewRun(t)
|
||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
|
|
||||||
|
@ -1599,7 +1599,23 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||||
}
|
}
|
||||||
r.Mkdir(context.Background(), r.Fremote)
|
r.Mkdir(context.Background(), r.Fremote)
|
||||||
|
|
||||||
fs.Config.BackupDir = r.FremoteName + "/backup"
|
if backupDir != "" {
|
||||||
|
fs.Config.BackupDir = r.FremoteName + "/" + backupDir
|
||||||
|
backupDir += "/"
|
||||||
|
} else {
|
||||||
|
fs.Config.BackupDir = ""
|
||||||
|
backupDir = "dst/"
|
||||||
|
// Exclude the suffix from the sync otherwise the sync
|
||||||
|
// deletes the old backup files
|
||||||
|
flt, err := filter.NewFilter(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, flt.AddRule("- *"+suffix))
|
||||||
|
oldFlt := filter.Active
|
||||||
|
filter.Active = flt
|
||||||
|
defer func() {
|
||||||
|
filter.Active = oldFlt
|
||||||
|
}()
|
||||||
|
}
|
||||||
fs.Config.Suffix = suffix
|
fs.Config.Suffix = suffix
|
||||||
fs.Config.SuffixKeepExtension = suffixKeepExtension
|
fs.Config.SuffixKeepExtension = suffixKeepExtension
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -1627,14 +1643,14 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// one should be moved to the backup dir and the new one installed
|
// one should be moved to the backup dir and the new one installed
|
||||||
file1.Path = "backup/one" + suffix
|
file1.Path = backupDir + "one" + suffix
|
||||||
file1a.Path = "dst/one"
|
file1a.Path = "dst/one"
|
||||||
// two should be unchanged
|
// two should be unchanged
|
||||||
// three should be moved to the backup dir
|
// three should be moved to the backup dir
|
||||||
if suffixKeepExtension {
|
if suffixKeepExtension {
|
||||||
file3.Path = "backup/three" + suffix + ".txt"
|
file3.Path = backupDir + "three" + suffix + ".txt"
|
||||||
} else {
|
} else {
|
||||||
file3.Path = "backup/three.txt" + suffix
|
file3.Path = backupDir + "three.txt" + suffix
|
||||||
}
|
}
|
||||||
|
|
||||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
|
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
|
||||||
|
@ -1652,22 +1668,29 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// one should be moved to the backup dir and the new one installed
|
// one should be moved to the backup dir and the new one installed
|
||||||
file1a.Path = "backup/one" + suffix
|
file1a.Path = backupDir + "one" + suffix
|
||||||
file1b.Path = "dst/one"
|
file1b.Path = "dst/one"
|
||||||
// two should be unchanged
|
// two should be unchanged
|
||||||
// three should be moved to the backup dir
|
// three should be moved to the backup dir
|
||||||
if suffixKeepExtension {
|
if suffixKeepExtension {
|
||||||
file3a.Path = "backup/three" + suffix + ".txt"
|
file3a.Path = backupDir + "three" + suffix + ".txt"
|
||||||
} else {
|
} else {
|
||||||
file3a.Path = "backup/three.txt" + suffix
|
file3a.Path = backupDir + "three.txt" + suffix
|
||||||
}
|
}
|
||||||
|
|
||||||
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
|
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
|
||||||
}
|
}
|
||||||
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
|
func TestSyncBackupDir(t *testing.T) {
|
||||||
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
|
testSyncBackupDir(t, "backup", "", false)
|
||||||
|
}
|
||||||
|
func TestSyncBackupDirWithSuffix(t *testing.T) {
|
||||||
|
testSyncBackupDir(t, "backup", ".bak", false)
|
||||||
|
}
|
||||||
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
|
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
|
||||||
testSyncBackupDir(t, "-2019-01-01", true)
|
testSyncBackupDir(t, "backup", "-2019-01-01", true)
|
||||||
|
}
|
||||||
|
func TestSyncBackupDirSuffixOnly(t *testing.T) {
|
||||||
|
testSyncBackupDir(t, "", ".bak", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with Suffix set
|
// Test with Suffix set
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
// Version of rclone
|
// Version of rclone
|
||||||
var Version = "v1.53.0-DEV"
|
var Version = "v1.53.4-DEV"
|
||||||
|
|
|
@ -27,15 +27,16 @@ type Test struct {
|
||||||
//
|
//
|
||||||
// FIXME make bucket based remotes set sub-dir automatically???
|
// FIXME make bucket based remotes set sub-dir automatically???
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
Backend string // name of the backend directory
|
Backend string // name of the backend directory
|
||||||
Remote string // name of the test remote
|
Remote string // name of the test remote
|
||||||
FastList bool // set to test with -fast-list
|
FastList bool // set to test with -fast-list
|
||||||
Short bool // set to test with -short
|
Short bool // set to test with -short
|
||||||
OneOnly bool // set to run only one backend test at once
|
OneOnly bool // set to run only one backend test at once
|
||||||
MaxFile string // file size limit
|
MaxFile string // file size limit
|
||||||
CleanUp bool // when running clean, run cleanup first
|
CleanUp bool // when running clean, run cleanup first
|
||||||
Ignore []string // test names to ignore the failure of
|
Ignore []string // test names to ignore the failure of
|
||||||
Tests []string // paths of tests to run, blank for all
|
Tests []string // paths of tests to run, blank for all
|
||||||
|
ListRetries int // -list-retries if > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// includeTest returns true if this backend should be included in this
|
// includeTest returns true if this backend should be included in this
|
||||||
|
@ -79,16 +80,17 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
run := &Run{
|
run := &Run{
|
||||||
Remote: b.Remote,
|
Remote: b.Remote,
|
||||||
Backend: b.Backend,
|
Backend: b.Backend,
|
||||||
Path: t.Path,
|
Path: t.Path,
|
||||||
FastList: fastlist,
|
FastList: fastlist,
|
||||||
Short: (b.Short && t.Short),
|
Short: (b.Short && t.Short),
|
||||||
NoRetries: t.NoRetries,
|
NoRetries: t.NoRetries,
|
||||||
OneOnly: b.OneOnly,
|
OneOnly: b.OneOnly,
|
||||||
NoBinary: t.NoBinary,
|
NoBinary: t.NoBinary,
|
||||||
SizeLimit: int64(maxSize),
|
SizeLimit: int64(maxSize),
|
||||||
Ignore: ignore,
|
Ignore: ignore,
|
||||||
|
ListRetries: b.ListRetries,
|
||||||
}
|
}
|
||||||
if t.AddBackend {
|
if t.AddBackend {
|
||||||
run.Path = path.Join(run.Path, b.Backend)
|
run.Path = path.Join(run.Path, b.Backend)
|
||||||
|
|
|
@ -20,6 +20,7 @@ backends:
|
||||||
- backend: "b2"
|
- backend: "b2"
|
||||||
remote: "TestB2:"
|
remote: "TestB2:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
listretries: 5
|
||||||
- backend: "crypt"
|
- backend: "crypt"
|
||||||
remote: "TestCryptDrive:"
|
remote: "TestCryptDrive:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
@ -41,15 +42,13 @@ backends:
|
||||||
remote: "TestChunkerChunk3bNometaLocal:"
|
remote: "TestChunkerChunk3bNometaLocal:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
maxfile: 6k
|
maxfile: 6k
|
||||||
# Disable chunker with mailru tests until Mailru is fixed - see
|
- backend: "chunker"
|
||||||
# https://github.com/rclone/rclone/issues/4376
|
remote: "TestChunkerMailru:"
|
||||||
# - backend: "chunker"
|
fastlist: true
|
||||||
# remote: "TestChunkerMailru:"
|
- backend: "chunker"
|
||||||
# fastlist: true
|
remote: "TestChunkerChunk50bMailru:"
|
||||||
# - backend: "chunker"
|
fastlist: true
|
||||||
# remote: "TestChunkerChunk50bMailru:"
|
maxfile: 10k
|
||||||
# fastlist: true
|
|
||||||
# maxfile: 10k
|
|
||||||
- backend: "chunker"
|
- backend: "chunker"
|
||||||
remote: "TestChunkerChunk50bYandex:"
|
remote: "TestChunkerChunk50bYandex:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
@ -73,6 +72,10 @@ backends:
|
||||||
remote: "TestChunkerChunk50bSHA1HashS3:"
|
remote: "TestChunkerChunk50bSHA1HashS3:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
maxfile: 1k
|
maxfile: 1k
|
||||||
|
- backend: "chunker"
|
||||||
|
remote: "TestChunkerOverCrypt:"
|
||||||
|
fastlist: true
|
||||||
|
maxfile: 6k
|
||||||
- backend: "chunker"
|
- backend: "chunker"
|
||||||
remote: "TestChunkerChunk50bMD5QuickS3:"
|
remote: "TestChunkerChunk50bMD5QuickS3:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
@ -144,12 +147,12 @@ backends:
|
||||||
# ignore:
|
# ignore:
|
||||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||||
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||||
- backend: "s3"
|
# - backend: "s3"
|
||||||
remote: "TestS3Ceph:"
|
# remote: "TestS3Ceph:"
|
||||||
fastlist: true
|
# fastlist: true
|
||||||
ignore:
|
# ignore:
|
||||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||||
- TestIntegration/FsMkdir/FsPutFiles/SetTier
|
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||||
- backend: "s3"
|
- backend: "s3"
|
||||||
remote: "TestS3Alibaba:"
|
remote: "TestS3Alibaba:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
@ -170,11 +173,11 @@ backends:
|
||||||
- backend: "swift"
|
- backend: "swift"
|
||||||
remote: "TestSwift:"
|
remote: "TestSwift:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
- backend: "swift"
|
# - backend: "swift"
|
||||||
remote: "TestSwiftCeph:"
|
# remote: "TestSwiftCeph:"
|
||||||
fastlist: true
|
# fastlist: true
|
||||||
ignore:
|
# ignore:
|
||||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||||
- backend: "yandex"
|
- backend: "yandex"
|
||||||
remote: "TestYandex:"
|
remote: "TestYandex:"
|
||||||
fastlist: false
|
fastlist: false
|
||||||
|
|
|
@ -35,16 +35,17 @@ var (
|
||||||
// if retries are needed.
|
// if retries are needed.
|
||||||
type Run struct {
|
type Run struct {
|
||||||
// Config
|
// Config
|
||||||
Remote string // name of the test remote
|
Remote string // name of the test remote
|
||||||
Backend string // name of the backend
|
Backend string // name of the backend
|
||||||
Path string // path to the source directory
|
Path string // path to the source directory
|
||||||
FastList bool // add -fast-list to tests
|
FastList bool // add -fast-list to tests
|
||||||
Short bool // add -short
|
Short bool // add -short
|
||||||
NoRetries bool // don't retry if set
|
NoRetries bool // don't retry if set
|
||||||
OneOnly bool // only run test for this backend at once
|
OneOnly bool // only run test for this backend at once
|
||||||
NoBinary bool // set to not build a binary
|
NoBinary bool // set to not build a binary
|
||||||
SizeLimit int64 // maximum test file size
|
SizeLimit int64 // maximum test file size
|
||||||
Ignore map[string]struct{}
|
Ignore map[string]struct{}
|
||||||
|
ListRetries int // -list-retries if > 0
|
||||||
// Internals
|
// Internals
|
||||||
CmdLine []string
|
CmdLine []string
|
||||||
CmdString string
|
CmdString string
|
||||||
|
@ -336,8 +337,12 @@ func (r *Run) Init() {
|
||||||
r.CmdLine = []string{"./" + r.BinaryName()}
|
r.CmdLine = []string{"./" + r.BinaryName()}
|
||||||
}
|
}
|
||||||
r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
|
r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
|
||||||
if *listRetries > 0 {
|
listRetries := *listRetries
|
||||||
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(*listRetries))
|
if r.ListRetries > 0 {
|
||||||
|
listRetries = r.ListRetries
|
||||||
|
}
|
||||||
|
if listRetries > 0 {
|
||||||
|
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries))
|
||||||
}
|
}
|
||||||
r.Try = 1
|
r.Try = 1
|
||||||
if *verbose {
|
if *verbose {
|
||||||
|
|
|
@ -2,8 +2,10 @@
|
||||||
package random
|
package random
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
cryptorand "crypto/rand"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"math/rand"
|
"encoding/binary"
|
||||||
|
mathrand "math/rand"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +25,7 @@ func String(n int) string {
|
||||||
for i := range out {
|
for i := range out {
|
||||||
source := pattern[p]
|
source := pattern[p]
|
||||||
p = (p + 1) % len(pattern)
|
p = (p + 1) % len(pattern)
|
||||||
out[i] = source[rand.Intn(len(source))]
|
out[i] = source[mathrand.Intn(len(source))]
|
||||||
}
|
}
|
||||||
return string(out)
|
return string(out)
|
||||||
}
|
}
|
||||||
|
@ -41,7 +43,7 @@ func Password(bits int) (password string, err error) {
|
||||||
bytes++
|
bytes++
|
||||||
}
|
}
|
||||||
var pw = make([]byte, bytes)
|
var pw = make([]byte, bytes)
|
||||||
n, err := rand.Read(pw)
|
n, err := cryptorand.Read(pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "password read failed")
|
return "", errors.Wrap(err, "password read failed")
|
||||||
}
|
}
|
||||||
|
@ -51,3 +53,19 @@ func Password(bits int) (password string, err error) {
|
||||||
password = base64.RawURLEncoding.EncodeToString(pw)
|
password = base64.RawURLEncoding.EncodeToString(pw)
|
||||||
return password, nil
|
return password, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seed the global math/rand with crypto strong data
|
||||||
|
//
|
||||||
|
// This doesn't make it OK to use math/rand in crypto sensitive
|
||||||
|
// environments - don't do that! However it does help to mitigate the
|
||||||
|
// problem if that happens accidentally. This would have helped with
|
||||||
|
// CVE-2020-28924 - #4783
|
||||||
|
func Seed() error {
|
||||||
|
var seed int64
|
||||||
|
err := binary.Read(cryptorand.Reader, binary.LittleEndian, &seed)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to read random seed")
|
||||||
|
}
|
||||||
|
mathrand.Seed(seed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package random
|
package random
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -48,3 +49,16 @@ func TestPasswordDuplicates(t *testing.T) {
|
||||||
seen[s] = true
|
seen[s] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSeed(t *testing.T) {
|
||||||
|
// seed 100 times and check the first random number doesn't repeat
|
||||||
|
// This test could fail with a probability of ~ 10**-15
|
||||||
|
const n = 100
|
||||||
|
var seen = map[int64]bool{}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
assert.NoError(t, Seed())
|
||||||
|
first := rand.Int63()
|
||||||
|
assert.False(t, seen[first])
|
||||||
|
seen[first] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -342,9 +342,14 @@ func (f *File) Size() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modtime for the file
|
// SetModTime sets the modtime for the file
|
||||||
|
//
|
||||||
|
// if NoModTime is set then it does nothing
|
||||||
func (f *File) SetModTime(modTime time.Time) error {
|
func (f *File) SetModTime(modTime time.Time) error {
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
defer f.mu.Unlock()
|
defer f.mu.Unlock()
|
||||||
|
if f.d.vfs.Opt.NoModTime {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if f.d.vfs.Opt.ReadOnly {
|
if f.d.vfs.Opt.ReadOnly {
|
||||||
return EROFS
|
return EROFS
|
||||||
}
|
}
|
||||||
|
|
|
@ -166,6 +166,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||||
When using this mode it is recommended that --buffer-size is not set
|
When using this mode it is recommended that --buffer-size is not set
|
||||||
too big and --vfs-read-ahead is set large if required.
|
too big and --vfs-read-ahead is set large if required.
|
||||||
|
|
||||||
|
**IMPORTANT** not all file systems support sparse files. In particular
|
||||||
|
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||||
|
directory is on a filesystem which doesn't support sparse files and it
|
||||||
|
will log an ERROR message if one is detected.
|
||||||
|
|
||||||
### VFS Performance
|
### VFS Performance
|
||||||
|
|
||||||
These flags may be used to enable/disable features of the VFS for
|
These flags may be used to enable/disable features of the VFS for
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue