forked from TrueCloudLab/rclone
Compare commits
423 commits
pr-7789-on
...
tcl/master
Author | SHA1 | Date | |
---|---|---|---|
|
4733d46d83 | ||
|
7525fbd9a9 | ||
|
5557f6af23 | ||
|
c719874fd9 | ||
|
12b572e62a | ||
|
be95531bfd | ||
|
b93e134b5b | ||
|
0255757831 | ||
|
5385ce33a5 | ||
|
f2d16ab4c5 | ||
|
c0fc4fe0ca | ||
|
669b2f2669 | ||
|
e1ba10a86e | ||
|
022442cf58 | ||
|
5cc4488294 | ||
|
ec9566c5c3 | ||
|
f6976eb4c4 | ||
|
c242c00799 | ||
|
bf954b74ff | ||
|
88f0770d0a | ||
|
41d905c9b0 | ||
|
300a063b5e | ||
|
61bf29ed5e | ||
|
3191717572 | ||
|
961dfe97b5 | ||
|
22612b4b38 | ||
|
b9927461c3 | ||
|
6d04be99f2 | ||
|
06ae0dfa54 | ||
|
912f29b5b8 | ||
|
8d78768aaa | ||
|
6aa924f28d | ||
|
48f2c2db70 | ||
|
a88066aff3 | ||
|
75f5b06ff7 | ||
|
daeeb7c145 | ||
|
d6a5fc6ffa | ||
|
c0bfedf99c | ||
|
76b76c30bf | ||
|
737fcc804f | ||
|
70f3965354 | ||
|
d5c100edaf | ||
|
dc7458cea0 | ||
|
49f69196c2 | ||
|
3f7651291b | ||
|
796013dd06 | ||
|
e0da406ca7 | ||
|
9a02c04028 | ||
|
918185273f | ||
|
3f2074901a | ||
|
648afc7df4 | ||
|
16e0245a8e | ||
|
59acb9dfa9 | ||
|
bfec159504 | ||
|
842396c8a0 | ||
|
5f9a201b45 | ||
|
22583d0a5f | ||
|
f1466a429c | ||
|
e3b09211b8 | ||
|
156feff9f2 | ||
|
b29a22095f | ||
|
861c01caf5 | ||
|
f1a84d171e | ||
|
bcdfad3c83 | ||
|
88b0757288 | ||
|
33d6c3f92f | ||
|
752809309d | ||
|
4a54cc134f | ||
|
dfc2c98bbf | ||
|
604d6bcb9c | ||
|
d15704ef9f | ||
|
26bc9826e5 | ||
|
2a28b0eaf0 | ||
|
2d1c2b1f76 | ||
|
ffb2e2a6de | ||
|
c9c283533c | ||
|
71799d7efd | ||
|
8f4fdf6cc8 | ||
|
91b11f9eac | ||
|
b49927fbd0 | ||
|
1a8b7662e7 | ||
|
6ba3e24853 | ||
|
802a938bd1 | ||
|
9deb3e8adf | ||
|
296281a6eb | ||
|
711478554e | ||
|
906aef91fa | ||
|
6b58cd0870 | ||
|
af9f8ced80 | ||
|
c63f1865f3 | ||
|
1bb89bc818 | ||
|
a365503750 | ||
|
3bb6d0a42b | ||
|
f65755b3a3 | ||
|
33c5f35935 | ||
|
4367b999c9 | ||
|
b57e6213aa | ||
|
cd90ba4337 | ||
|
0e5eb7a9bb | ||
|
956c2963fd | ||
|
146562975b | ||
|
4c1cb0622e | ||
|
258092f9c6 | ||
|
be448c9e13 | ||
|
4e708e59f2 | ||
|
c8366dfef3 | ||
|
1e14523b82 | ||
|
da25305ba0 | ||
|
e439121ab2 | ||
|
37c12732f9 | ||
|
4c488e7517 | ||
|
7261f47bd2 | ||
|
1db8b20fbc | ||
|
a87d8967fc | ||
|
4804f1f1e9 | ||
|
d1c84f9115 | ||
|
e0b08883cb | ||
|
a0af72c27a | ||
|
28d6985764 | ||
|
f2ce9a9557 | ||
|
95151eac82 | ||
|
bd9bf4eb1c | ||
|
705c72d293 | ||
|
330c6702eb | ||
|
4d787ae87f | ||
|
86e9a56d73 | ||
|
64e8013c1b | ||
|
33bff6fe71 | ||
|
e82b5b11af | ||
|
4454ed9d3b | ||
|
bad8207378 | ||
|
c6d3714e73 | ||
|
59501fcdb6 | ||
|
afd199d756 | ||
|
00e073df1e | ||
|
2e007f89c7 | ||
|
edd9347694 | ||
|
1fad49ee35 | ||
|
182b2a6417 | ||
|
da9faf1ffe | ||
|
303358eeda | ||
|
62233b4993 | ||
|
498abcc062 | ||
|
482bfae8fa | ||
|
ae9960a4ed | ||
|
089c168fb9 | ||
|
6f515ded8f | ||
|
91c6faff71 | ||
|
874616a73e | ||
|
458d93ea7e | ||
|
513653910c | ||
|
bd5199910b | ||
|
f6d836eefd | ||
|
87ec26001f | ||
|
3e12612aae | ||
|
aee2480fc4 | ||
|
3ffa47ea16 | ||
|
70e8ad456f | ||
|
55b9b3e33a | ||
|
ce7dfa075c | ||
|
a697d27455 | ||
|
cae22a7562 | ||
|
877321c2fb | ||
|
574378e871 | ||
|
50d42babd8 | ||
|
13ea77dd71 | ||
|
62b76b631c | ||
|
96f92b7364 | ||
|
7c02a63884 | ||
|
67d4394a37 | ||
|
c1a98768bc | ||
|
bac9abebfb | ||
|
27b281ef69 | ||
|
10270a4354 | ||
|
d08b49d723 | ||
|
cb2d2d72a0 | ||
|
e686e34f89 | ||
|
5f66350331 | ||
|
e1d935b854 | ||
|
61b27cda80 | ||
|
83613634f9 | ||
|
1c80cbd13a | ||
|
9d5315a944 | ||
|
8d1d096c11 | ||
|
4b922d86d7 | ||
|
3b3625037c | ||
|
bfa3278f30 | ||
|
e334366345 | ||
|
642d4082ac | ||
|
024ff6ed15 | ||
|
d6b0743cf4 | ||
|
e4749cf0d0 | ||
|
8d2907d8f5 | ||
|
1720d3e11c | ||
|
c6352231e4 | ||
|
731947f3ca | ||
|
16d642825d | ||
|
50aebcf403 | ||
|
c8555d1b16 | ||
|
3ec0ff5d8f | ||
|
746516511d | ||
|
8aef1de695 | ||
|
cb611b8330 | ||
|
66ae050a8b | ||
|
fd9049c83d | ||
|
a1f52bcf50 | ||
|
0470450583 | ||
|
1901bae4eb | ||
|
9866d1c636 | ||
|
c5c7bcdd45 | ||
|
d5c7b55ba5 | ||
|
feafbfca52 | ||
|
abe01179ae | ||
|
612c717ea0 | ||
|
f26d2c6ba8 | ||
|
dcecb0ede4 | ||
|
47588a7fd0 | ||
|
ba381f8721 | ||
|
8f0ddcca4e | ||
|
404ef80025 | ||
|
13fa583368 | ||
|
e111ffba9e | ||
|
30ba7542ff | ||
|
31fabb3402 | ||
|
b3edc9d360 | ||
|
04f35fc3ac | ||
|
8e5dd79e4d | ||
|
b809e71d6f | ||
|
d149d1ec3e | ||
|
3b51ad24b2 | ||
|
485aa90d13 | ||
|
8958d06456 | ||
|
ca24447090 | ||
|
d008381e59 | ||
|
14629c66f9 | ||
|
4824837eed | ||
|
5287a9b5fa | ||
|
f2ce1767f0 | ||
|
7f048ac901 | ||
|
b0d0e0b267 | ||
|
f5eef420a4 | ||
|
9de485f949 | ||
|
d4b29fef92 | ||
|
471531eb6a | ||
|
afd2663057 | ||
|
97d6a00483 | ||
|
5ddedae431 | ||
|
e1b7bf7701 | ||
|
2a615f4681 | ||
|
e041796bfe | ||
|
1b9217bc78 | ||
|
846c1aeed0 | ||
|
56caab2033 | ||
|
495a5759d3 | ||
|
d9bd6f35f2 | ||
|
532a0818f7 | ||
|
91558ce6aa | ||
|
8fbb259091 | ||
|
4d2bc190cc | ||
|
c2bf300dd8 | ||
|
c954c397d9 | ||
|
25c6379688 | ||
|
ce1859cd82 | ||
|
cf25ae69ad | ||
|
dce8317042 | ||
|
eff2497633 | ||
|
28ba4b832d | ||
|
58da1a165c | ||
|
eec95a164d | ||
|
44cd2e07ca | ||
|
a28287e96d | ||
|
fc1d8dafd5 | ||
|
2c57fe9826 | ||
|
7c51b10d15 | ||
|
3280b6b83c | ||
|
1a77a2f92b | ||
|
c156716d01 | ||
|
0d9d0eef4c | ||
|
2e653f8128 | ||
|
e79273f9c9 | ||
|
8e10fe71f7 | ||
|
c6ab37a59f | ||
|
671a15f65f | ||
|
8d72698d5a | ||
|
6e853c82d8 | ||
|
27267547b9 | ||
|
cdcf0e5cb8 | ||
|
6507770014 | ||
|
bd5799c079 | ||
|
c834eb7dcb | ||
|
754e53dbcc | ||
|
5511fa441a | ||
|
4ed4483bbc | ||
|
0e85ba5080 | ||
|
e5095a7d7b | ||
|
300851e8bf | ||
|
cbccad9491 | ||
|
9f1a7cfa67 | ||
|
d84a4c9ac1 | ||
|
1c9da8c96a | ||
|
af9c5fef93 | ||
|
7060777d1d | ||
|
0197e7f4e5 | ||
|
c1c9e209f3 | ||
|
fd182af866 | ||
|
4ea629446f | ||
|
93e8a976ef | ||
|
8470bdf810 | ||
|
1aa3a37a28 | ||
|
ae887ad042 | ||
|
d279fea44a | ||
|
282e34f2d5 | ||
|
021f25a748 | ||
|
18e9d039ad | ||
|
cbcfb90d9a | ||
|
caba22a585 | ||
|
3fef8016b5 | ||
|
edf6537c61 | ||
|
00f0e9df9d | ||
|
e6ab644350 | ||
|
61c18e3b60 | ||
|
d068e0b1a9 | ||
|
a341065b8d | ||
|
0c29a1fe31 | ||
|
1a40300b5f | ||
|
44be27729a | ||
|
b7624287ac | ||
|
6db9f7180f | ||
|
b601961e54 | ||
|
51aca9cf9d | ||
|
7c0645dda9 | ||
|
aed77a8fb2 | ||
|
4250dd98f3 | ||
|
c13118246c | ||
|
a56cd52025 | ||
|
3ae4534ce6 | ||
|
9c287c72d6 | ||
|
862d5d6086 | ||
|
003f4531fe | ||
|
a52e887ddd | ||
|
b7681e72bf | ||
|
ce5024bf33 | ||
|
d2af114139 | ||
|
c8d6b02dd6 | ||
|
55cac4c34d | ||
|
7ce60a47e8 | ||
|
27496fb26d | ||
|
39f8d039fe | ||
|
57f5ad188b | ||
|
76798d5bb1 | ||
|
5921bb0efd | ||
|
ce0d8a70a3 | ||
|
c23a40cb2a | ||
|
86a1951a56 | ||
|
b778ec0142 | ||
|
dac7f76b14 | ||
|
446d6b28b8 | ||
|
7e04ff9528 | ||
|
4568feb5f9 | ||
|
b9a2d3b6b9 | ||
|
775e567a7b | ||
|
59fc7ac193 | ||
|
fea61cac9e | ||
|
3f3e4b055e | ||
|
2257c03391 | ||
|
8f1c309c81 | ||
|
8e2f596fd0 | ||
|
de742ffc67 | ||
|
181ed55662 | ||
|
a5700a4a53 | ||
|
faa58315c5 | ||
|
7b89735ae7 | ||
|
91192c2c5e | ||
|
96e39ea486 | ||
|
488ed28635 | ||
|
b059c96322 | ||
|
6d22168a8c | ||
|
e34e2df600 | ||
|
6607102034 | ||
|
c6c327e4e7 | ||
|
6a0a54ab97 | ||
|
629e895da8 | ||
|
cc634213a5 | ||
|
e9e9feb21e | ||
|
f26fc8f07c | ||
|
96703bb31e | ||
|
96d3adc771 | ||
|
f82822baca | ||
|
af33a4f822 | ||
|
a675cc6677 | ||
|
ad605ee356 | ||
|
4ab235c06c | ||
|
9a2b85d71c | ||
|
29b58dd4c5 | ||
|
36ad4eb145 | ||
|
61ab519791 | ||
|
678941afc1 | ||
|
b153254b3a | ||
|
17cd7a9496 | ||
|
0735f44f91 | ||
|
04c69959b8 | ||
|
25cc8c927a | ||
|
6356b51b33 | ||
|
1890608f55 | ||
|
cd76fd9219 | ||
|
5b8cdaff39 | ||
|
f2f559230c | ||
|
e0b38cc9ac | ||
|
68dc79eddd | ||
|
76cea0c704 | ||
|
41d5d8b88a | ||
|
aa2746d0de | ||
|
b2f6aac754 | ||
|
a0dacf4930 | ||
|
c5ff5afc21 | ||
|
bd8523f208 | ||
|
0bfd70c405 | ||
|
47735d8fe1 | ||
|
617534112b | ||
|
271ec43189 | ||
|
10eb4742dd | ||
|
2a2ec06ec1 | ||
|
7237b142fa |
644 changed files with 152471 additions and 94198 deletions
45
.forgejo/ISSUE_TEMPLATE/bug_report.md
Normal file
45
.forgejo/ISSUE_TEMPLATE/bug_report.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage, bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory -->
|
||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
||||
<!--- uncomment the following phrase: -->
|
||||
|
||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. -->
|
||||
|
||||
1.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used:
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
1
.forgejo/ISSUE_TEMPLATE/config.yml
Normal file
1
.forgejo/ISSUE_TEMPLATE/config.yml
Normal file
|
@ -0,0 +1 @@
|
|||
blank_issues_enabled: false
|
24
.forgejo/workflows/builds.yaml
Normal file
24
.forgejo/workflows/builds.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
67
.forgejo/workflows/tests.yml
Normal file
67
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
- name: Run linters
|
||||
run: make check
|
||||
test:
|
||||
name: Test
|
||||
runs-on: oci-runner
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Tests for the FrostFS backend
|
||||
env:
|
||||
RESTIC_TEST_FUSE: false
|
||||
AIO_IMAGE: truecloudlab/frostfs-aio
|
||||
AIO_VERSION: 1.7.0-nightly.4
|
||||
RCLONE_CONFIG: /config/rclone.conf
|
||||
|
||||
# run only tests related to FrostFS backend
|
||||
run: |-
|
||||
podman-service.sh
|
||||
podman info
|
||||
|
||||
mkdir /config
|
||||
printf "[TestFrostFS]\ntype = frostfs\nendpoint = localhost:8080\nwallet = /config/wallet.json\nplacement_policy = REP 1\nrequest_timeout = 20s\nconnection_timeout = 21s" > /config/rclone.conf
|
||||
|
||||
echo "Run frostfs aio container"
|
||||
docker run -d --net=host --name aio $AIO_IMAGE:$AIO_VERSION --restart always -p 8080:8080
|
||||
|
||||
echo "Wait for frostfs to start"
|
||||
until docker exec aio curl --fail http://localhost:8083 > /dev/null 2>&1; do sleep 0.2; done;
|
||||
|
||||
echo "Issue creds"
|
||||
docker exec aio /usr/bin/issue-creds.sh native
|
||||
echo "Copy wallet"
|
||||
docker cp aio:/config/user-wallet.json /config/wallet.json
|
||||
|
||||
echo "Start tests"
|
||||
go test -v github.com/rclone/rclone/backend/frostfs
|
64
.github/workflows/build.yml
vendored
64
.github/workflows/build.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
|
@ -43,14 +43,14 @@ jobs:
|
|||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
|
@ -59,14 +59,14 @@ jobs:
|
|||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
|
@ -76,23 +76,23 @@ jobs:
|
|||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
@ -124,7 +124,7 @@ jobs:
|
|||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
|
@ -137,7 +137,7 @@ jobs:
|
|||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
brew install git-annex
|
||||
brew install git-annex git-annex-remote-rclone
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
|
@ -223,24 +223,42 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: false # Caching enabled (which is default) on this first lint step only, it handles complete cache of build, go modules and golangci-lint analysis which was necessary to get all lint steps to properly take advantage of it
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
|
@ -248,7 +266,7 @@ jobs:
|
|||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
|
@ -256,7 +274,7 @@ jobs:
|
|||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
|
@ -264,7 +282,7 @@ jobs:
|
|||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
|
@ -293,7 +311,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
|
|
@ -56,7 +56,7 @@ jobs:
|
|||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
|
|
|
@ -32,15 +32,27 @@ jobs:
|
|||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
|
|
15
.github/workflows/notify.yml
vendored
Normal file
15
.github/workflows/notify.yml
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
name: Notify users based on issue labels
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
notify:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
||||
with:
|
||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
||||
recipients: |
|
||||
Support Contract=@rclone/support
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -3,7 +3,9 @@ _junk/
|
|||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
docs/public
|
||||
/docs/public/
|
||||
/docs/.hugo_build.lock
|
||||
/docs/static/img/logos/
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
|
@ -16,6 +18,5 @@ fuzz-build.zip
|
|||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
/docs/static/img/logos/
|
||||
resource_windows_*.syso
|
||||
.devcontainer
|
||||
|
|
|
@ -13,6 +13,7 @@ linters:
|
|||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
@ -98,3 +99,46 @@ linters-settings:
|
|||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
|
|
@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
|||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
test_all -backends drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
|
@ -508,7 +508,7 @@ You'll need to modify the following files
|
|||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
|
|
@ -21,6 +21,8 @@ Current active maintainers of rclone are:
|
|||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
|
60969
MANUAL.html
generated
60969
MANUAL.html
generated
File diff suppressed because it is too large
Load diff
35442
MANUAL.txt
generated
35442
MANUAL.txt
generated
File diff suppressed because it is too large
Load diff
13
Makefile
13
Makefile
|
@ -43,6 +43,7 @@ ifdef GOTAGS
|
|||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
||||
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
|
@ -50,7 +51,7 @@ rclone:
|
|||
ifeq ($(GO_OS),windows)
|
||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
||||
ifeq ($(GO_OS),windows)
|
||||
rm resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
|
@ -59,7 +60,7 @@ endif
|
|||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
|
@ -87,13 +88,13 @@ test: rclone test_all
|
|||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
|
@ -238,7 +239,7 @@ fetch_binaries:
|
|||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w --disableFastRender
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
|
|
|
@ -55,11 +55,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
|
@ -73,6 +76,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
|
@ -92,6 +96,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
|
@ -100,6 +105,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
|
|
51
RELEASE.md
51
RELEASE.md
|
@ -37,18 +37,44 @@ This file describes how to make the various kinds of releases
|
|||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
Early in the next release cycle update the dependencies.
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make updatedirect
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* `make updatedirect`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* Fix anything which doesn't compile at this point and commit changes here
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
And try again.
|
||||
|
||||
Optionally upgrade the direct and indirect dependencies. This is very
|
||||
likely to fail if the manual method was used abve - in that case
|
||||
ignore it as it is too time consuming to fix.
|
||||
|
||||
* `make update`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* roll back any updates which didn't compile
|
||||
* git commit -a -v --amend
|
||||
* `git commit -a -v --amend`
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
|
@ -57,6 +83,9 @@ doing that so it may be necessary to roll back dependencies to the
|
|||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
@ -139,6 +168,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
|||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.67.0
|
||||
v1.68.2
|
||||
|
|
|
@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
|
|
@ -17,7 +17,10 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/frostfs"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
|
@ -39,6 +42,7 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
|
|
@ -711,10 +711,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.svc
|
||||
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
|
||||
var (
|
||||
cred azcore.TokenCredential
|
||||
sharedKeyCred *service.SharedKeyCredential
|
||||
anonymous = false
|
||||
)
|
||||
switch {
|
||||
case opt.EnvAuth:
|
||||
|
@ -874,6 +875,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
|
@ -903,6 +907,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("create client failed: %w", err)
|
||||
}
|
||||
} else if anonymous {
|
||||
// Anonymous public access
|
||||
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create public client failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.svc == nil {
|
||||
|
@ -1088,7 +1098,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
|||
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
|
@ -2084,7 +2094,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
|
@ -2107,7 +2116,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
|
|
|
@ -1035,12 +1035,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else {
|
||||
} else if size != o.Size() {
|
||||
// Resize the file if needed
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ below will cause b2 to return specific errors:
|
|||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
|
@ -244,7 +244,7 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
|||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// See: https://www.backblaze.com/b2/docs/files.html
|
||||
// See: https://www.backblaze.com/docs/cloud-storage-files
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// FIXME: allow /, but not leading, trailing or double
|
||||
Default: (encoder.Display |
|
||||
|
@ -299,13 +299,14 @@ type Fs struct {
|
|||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -1566,7 +1567,7 @@ func (o *Object) Size() int64 {
|
|||
//
|
||||
// Make sure it is lower case.
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) string {
|
||||
const unverified = "unverified:"
|
||||
|
@ -1593,7 +1594,14 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
|||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
return o.parseTimeString(Info[timeKey])
|
||||
err = o.parseTimeString(Info[timeKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
|
@ -1695,6 +1703,16 @@ func timeString(modTime time.Time) string {
|
|||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time
|
||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
|
@ -1702,12 +1720,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
modTime, err := parseTimeStringHelper(timeString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1861,6 +1879,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
@ -1958,7 +1984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
|
@ -1990,7 +2016,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
|
@ -2095,6 +2124,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||
// When metadata support is added to b2, this method will need a more generic name
|
||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k = strings.ToLower(k)
|
||||
// For now, the only metadata we're concerned with is "mtime"
|
||||
switch k {
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
default:
|
||||
// Do nothing for now
|
||||
}
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
|
@ -2126,7 +2185,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
|
@ -184,57 +184,126 @@ func TestParseTimeString(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
// This is adapted from the s3 equivalent.
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We currently have a limited amount of metadata to test with B2
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||
var headers = make(map[string]string)
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
headers[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
return headers
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||
t.Run(what, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(size)
|
||||
require.NoError(t, err)
|
||||
original := random.String(int(ss))
|
||||
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
if chunkSize != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(chunkSize)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadChunkSize(ss)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
if uploadCutoff != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(uploadCutoff)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadCutoff(ss)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||
|
||||
"mtime": "2009-05-06T04:05:06.499Z",
|
||||
}
|
||||
|
||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||
options := []fs.OpenOption{
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||
}
|
||||
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// X-Bz-Info-a & X-Bz-Info-b
|
||||
optMetadata := OpenOptionToMetaData(options)
|
||||
for k, v := range optMetadata {
|
||||
got := gotMetadata.Info[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// 1 kB regular file
|
||||
f.internalTestMetadata(t, "1kiB", "", "")
|
||||
|
||||
// 10 MiB large file
|
||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||
}
|
||||
|
||||
func sha1Sum(t *testing.T, s string) string {
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
||||
|
||||
package b2
|
||||
|
||||
|
@ -91,7 +91,7 @@ type largeUpload struct {
|
|||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
|
@ -104,11 +104,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
|
@ -118,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||
if newInfo == nil {
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Custom upload headers - remove header prefix since they are sent in the body
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
request.Info[k[len(headerPrefix):]] = v
|
||||
} else {
|
||||
optionsToSend = append(optionsToSend, option)
|
||||
}
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
|
@ -134,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
Options: optionsToSend,
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
|
22
backend/cache/cache.go
vendored
22
backend/cache/cache.go
vendored
|
@ -409,18 +409,16 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
87
backend/cache/cache_internal_test.go
vendored
87
backend/cache/cache_internal_test.go
vendored
|
@ -10,7 +10,6 @@ import (
|
|||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -33,7 +32,7 @@ import (
|
|||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -93,7 +92,7 @@ func TestMain(m *testing.M) {
|
|||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
|
@ -123,10 +122,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
@ -338,7 +337,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -368,7 +367,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -408,7 +407,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
// update in the wrapped fs
|
||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||
require.NoError(t, err)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
fs.Logf(nil, "original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
|
@ -417,7 +416,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
|
@ -425,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
fs.Logf(nil, "updated size: %v", len(data2))
|
||||
|
||||
// get a new instance from the cache
|
||||
if runInstance.wrappedIsExternal {
|
||||
|
@ -485,49 +484,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||
err = runInstance.retryBlock(func() error {
|
||||
li, err := runInstance.list(t, rootFs, "test")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 2 {
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 0 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
log.Printf("complete listing: %v", li)
|
||||
fs.Logf(nil, "complete listing: %v", li)
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
|
@ -577,43 +576,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
fs.Logf(nil, "not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
fs.Logf(nil, "not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
fs.Logf(nil, "not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
fs.Logf(nil, "complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
|
@ -708,7 +707,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
@ -743,7 +742,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
@ -771,24 +770,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||
|
||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
time.Sleep(time.Second * 30)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
log.Printf("len: %v", len(di))
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
|
@ -829,7 +828,7 @@ func newRun() *run {
|
|||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
}
|
||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
|
||||
return r
|
||||
}
|
||||
|
@ -850,8 +849,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
|
@ -875,12 +874,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
|
@ -891,14 +890,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
|
@ -1192,7 +1191,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size = size - 32
|
||||
size -= 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
|
|
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
|
@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
|||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
|
10
backend/cache/handle.go
vendored
10
backend/cache/handle.go
vendored
|
@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
chunkStart -= offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
|
@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
|
@ -415,10 +415,8 @@ func (w *worker) run() {
|
|||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
|
|
@ -308,7 +308,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||
root: rpath,
|
||||
opt: *opt,
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||
|
@ -326,9 +325,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||
if testErr == fs.ErrorIsFile {
|
||||
f.base = newBase
|
||||
err = testErr
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
}
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
|
@ -988,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||
}
|
||||
}
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
|
|
|
@ -36,6 +36,7 @@ func TestIntegration(t *testing.T) {
|
|||
"GetTier",
|
||||
"SetTier",
|
||||
"Metadata",
|
||||
"SetMetadata",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
|
|
|
@ -1119,6 +1119,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
|
@ -1286,6 +1287,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
|
@ -1351,7 +1363,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
|
|
@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir = dir % 256
|
||||
dir %= 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
|
@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos = pos - thisdir
|
||||
pos -= thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
|
@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
fh.buf[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
|
|
@ -1248,6 +1248,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
|
|
|
@ -151,6 +151,7 @@ func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
|||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
{Bit: uint64(rwFailOK), Name: "failok"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,6 +161,7 @@ type rwChoice = fs.Bits[rwChoices]
|
|||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwFailOK
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
|
@ -173,6 +175,9 @@ var rwExamples = fs.OptionExamples{{
|
|||
}, {
|
||||
Value: rwWrite.String(),
|
||||
Help: "Write the value only",
|
||||
}, {
|
||||
Value: rwFailOK.String(),
|
||||
Help: "If writing fails log errors only, don't fail the transfer",
|
||||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
|
@ -1747,10 +1752,9 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
|||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
pathID = actualID(pathID)
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
Name: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
|
@ -2215,7 +2219,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2430,7 +2434,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
|||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
Parents: []string{directoryID},
|
||||
ModifiedTime: modTime.Format(timeFormatOut),
|
||||
}
|
||||
|
@ -2830,7 +2833,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
// FIXME remove this when google fixes the problem!
|
||||
if isDoc {
|
||||
// A short sleep is needed here in order to make the
|
||||
// change effective, without it is is ignored. This is
|
||||
// change effective, without it is ignored. This is
|
||||
// probably some eventual consistency nastiness.
|
||||
sleepTime := 2 * time.Second
|
||||
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
|
||||
|
@ -3773,7 +3776,7 @@ file named "foo ' \.txt":
|
|||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
|
@ -3789,7 +3792,7 @@ The result is a JSON array of matches, for example:
|
|||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
]`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
|
@ -3962,7 +3965,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
return "", hash.ErrUnsupported
|
||||
}
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
|
|
|
@ -551,9 +551,11 @@ func (f *Fs) InternalTestQuery(t *testing.T) {
|
|||
|
||||
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Len(t, results[0].Id, 33)
|
||||
assert.Equal(t, results[0].Name, item)
|
||||
require.True(t, len(results) > 0)
|
||||
for _, result := range results {
|
||||
assert.True(t, len(result.Id) > 0)
|
||||
assert.Equal(t, result.Name, item)
|
||||
}
|
||||
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
||||
}
|
||||
})
|
||||
|
@ -564,7 +566,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
opt := &filter.Options{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
|
|
|
@ -152,7 +152,7 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
|||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set permission: %v", err)
|
||||
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
|
||||
errs.Add(err)
|
||||
}
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.La
|
|||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set owner: %w", err)
|
||||
return fmt.Errorf("failed to set labels: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -372,6 +372,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
|||
// shared drives.
|
||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
||||
// Don't process permissions if there aren't any specifically set
|
||||
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
|
||||
info.Permissions = nil
|
||||
info.PermissionIds = nil
|
||||
}
|
||||
|
@ -553,7 +554,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||
}
|
||||
// Can't set Owner on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setOwner(ctx, info, v)
|
||||
err := f.setOwner(ctx, info, v)
|
||||
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
|
||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
case "permissions":
|
||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
|
@ -566,7 +572,13 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||
}
|
||||
// Can't set Permissions on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setPermissions(ctx, info, perms)
|
||||
err := f.setPermissions(ctx, info, perms)
|
||||
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
|
||||
// We've already logged the permissions errors individually here
|
||||
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
case "labels":
|
||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
||||
|
@ -579,7 +591,12 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||
}
|
||||
// Can't set Labels on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setLabels(ctx, info, labels)
|
||||
err := f.setLabels(ctx, info, labels)
|
||||
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
|
||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
case "folder-color-rgb":
|
||||
updateInfo.FolderColorRgb = v
|
||||
|
|
|
@ -216,7 +216,10 @@ are supported.
|
|||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.`,
|
||||
shared folder.
|
||||
|
||||
See also --dropbox-root-namespace for an alternative way to work with shared
|
||||
folders.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
@ -237,6 +240,11 @@ shared folder.`,
|
|||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "root_namespace",
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
}
|
||||
|
@ -253,6 +261,7 @@ type Options struct {
|
|||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
|
@ -377,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
|
@ -502,8 +511,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
|
||||
f.features.Fill(ctx, f)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
if f.opt.RootNsid != "" {
|
||||
f.ns = f.opt.RootNsid
|
||||
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
|
||||
} else if strings.HasPrefix(root, "/") {
|
||||
// If root starts with / then use the actual root
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
|
|
|
@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374:
|
||||
case 374, 412: // Flood detected seems to be #412 now
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
|
|
|
@ -441,23 +441,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
|
||||
// Find current directory ID
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
return src, nil
|
||||
}
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
|
@ -466,11 +471,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
rename := dstLeaf
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
rename = ""
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
|
901
backend/filescom/filescom.go
Normal file
901
backend/filescom/filescom.go
Normal file
|
@ -0,0 +1,901 @@
|
|||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
if strings.Contains(opt.Site, ".") {
|
||||
config.EndpointOverride = opt.Site
|
||||
} else {
|
||||
config.Subdomain = opt.Site
|
||||
}
|
||||
|
||||
_, err = url.ParseRequestURI(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
17
backend/filescom/filescom_test.go
Normal file
17
backend/filescom/filescom_test.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
1585
backend/frostfs/frostfs.go
Normal file
1585
backend/frostfs/frostfs.go
Normal file
File diff suppressed because it is too large
Load diff
16
backend/frostfs/frostfs_test.go
Normal file
16
backend/frostfs/frostfs_test.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFrostFS:",
|
||||
NilObject: (*Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
326
backend/frostfs/util.go
Normal file
326
backend/frostfs/util.go
Normal file
|
@ -0,0 +1,326 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
resolver "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
type endpointInfo struct {
|
||||
Address string
|
||||
Priority int
|
||||
Weight float64
|
||||
}
|
||||
|
||||
func publicReadWriteCCPRules() []chain.Rule {
|
||||
return []chain.Rule{
|
||||
{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodPutObject,
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodDeleteObject,
|
||||
native.MethodSearchObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodPatchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Any: false},
|
||||
}
|
||||
}
|
||||
|
||||
func privateCCPRules() []chain.Rule {
|
||||
rule := publicReadWriteCCPRules()
|
||||
// The same as public-read-write, except that only the owner is allowed to perform the listed actions
|
||||
rule[0].Condition = []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOwner,
|
||||
},
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
func publicReadCCPRules() []chain.Rule {
|
||||
rule := privateCCPRules()
|
||||
// Add a rule that allows other users to perform reading actions.
|
||||
rule = append(rule, chain.Rule{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodSearchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Condition: []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOthers,
|
||||
},
|
||||
}, Any: false})
|
||||
return rule
|
||||
}
|
||||
|
||||
func parseContainerCreationPolicyString(policyString string) ([]chain.Rule, error) {
|
||||
switch policyString {
|
||||
case "private":
|
||||
return privateCCPRules(), nil
|
||||
case "public-read":
|
||||
return publicReadCCPRules(), nil
|
||||
case "public-read-write":
|
||||
return publicReadWriteCCPRules(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid container creation policy: %s", policyString)
|
||||
}
|
||||
|
||||
func parseEndpoints(endpointParam string) ([]endpointInfo, error) {
|
||||
var err error
|
||||
expectedLength := -1 // to make sure all endpoints have the same format
|
||||
|
||||
endpoints := strings.Split(strings.TrimSpace(endpointParam), " ")
|
||||
res := make([]endpointInfo, 0, len(endpoints))
|
||||
seen := make(map[string]struct{}, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpointInfoSplit := strings.Split(endpoint, ",")
|
||||
address := endpointInfoSplit[0]
|
||||
|
||||
if len(address) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[address]; ok {
|
||||
return nil, fmt.Errorf("endpoint '%s' is already defined", address)
|
||||
}
|
||||
seen[address] = struct{}{}
|
||||
|
||||
epInfo := endpointInfo{
|
||||
Address: address,
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}
|
||||
|
||||
if expectedLength == -1 {
|
||||
expectedLength = len(endpointInfoSplit)
|
||||
}
|
||||
|
||||
if len(endpointInfoSplit) != expectedLength {
|
||||
return nil, fmt.Errorf("all endpoints must have the same format: '%s'", endpointParam)
|
||||
}
|
||||
|
||||
switch len(endpointInfoSplit) {
|
||||
case 1:
|
||||
case 2:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
case 3:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
|
||||
epInfo.Weight, err = parseWeight(endpointInfoSplit[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid endpoint format '%s'", endpoint)
|
||||
}
|
||||
|
||||
res = append(res, epInfo)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parsePriority(priorityStr string) (int, error) {
|
||||
priority, err := strconv.Atoi(priorityStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid priority '%s': %w", priorityStr, err)
|
||||
}
|
||||
if priority <= 0 {
|
||||
return 0, fmt.Errorf("priority must be positive '%s'", priorityStr)
|
||||
}
|
||||
|
||||
return priority, nil
|
||||
}
|
||||
|
||||
func parseWeight(weightStr string) (float64, error) {
|
||||
weight, err := strconv.ParseFloat(weightStr, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid weight '%s': %w", weightStr, err)
|
||||
}
|
||||
if weight <= 0 {
|
||||
return 0, fmt.Errorf("weight must be positive '%s'", weightStr)
|
||||
}
|
||||
|
||||
return weight, nil
|
||||
}
|
||||
|
||||
func createPool(ctx context.Context, key *keys.PrivateKey, cfg *Options) (*pool.Pool, error) {
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prm.SetNodeDialTimeout(time.Duration(cfg.FrostfsConnectionTimeout))
|
||||
prm.SetHealthcheckTimeout(time.Duration(cfg.FrostfsRequestTimeout))
|
||||
prm.SetClientRebalanceInterval(time.Duration(cfg.FrostfsRebalanceInterval))
|
||||
prm.SetSessionExpirationDuration(cfg.FrostfsSessionExpiration)
|
||||
|
||||
nodes, err := getNodePoolParams(cfg.FrostfsEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
prm.AddNode(node)
|
||||
}
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create pool: %w", err)
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func getNodePoolParams(endpointParam string) ([]pool.NodeParam, error) {
|
||||
endpointInfos, err := parseEndpoints(endpointParam)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse endpoints params: %w", err)
|
||||
}
|
||||
|
||||
res := make([]pool.NodeParam, len(endpointInfos))
|
||||
for i, info := range endpointInfos {
|
||||
res[i] = pool.NewNodeParam(info.Priority, info.Address, info.Weight)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func createNNSResolver(cfg *Options) (*resolver.NNS, error) {
|
||||
if cfg.RPCEndpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var nns resolver.NNS
|
||||
if err := nns.Dial(cfg.RPCEndpoint); err != nil {
|
||||
return nil, fmt.Errorf("dial NNS resolver: %w", err)
|
||||
}
|
||||
|
||||
return &nns, nil
|
||||
}
|
||||
|
||||
func getAccount(cfg *Options) (*wallet.Account, error) {
|
||||
w, err := wallet.NewWalletFromFile(cfg.Wallet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := w.GetChangeAddress()
|
||||
if cfg.Address != "" {
|
||||
addr, err = flags.ParseAddress(cfg.Address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address")
|
||||
}
|
||||
}
|
||||
acc := w.GetAccount(addr)
|
||||
err = acc.Decrypt(cfg.Password, w.Scrypt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func newAddress(cnrID cid.ID, objID oid.ID) oid.Address {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnrID)
|
||||
addr.SetObject(objID)
|
||||
return addr
|
||||
}
|
||||
|
||||
func formObject(own *user.ID, cnrID cid.ID, name string, header map[string]string) *object.Object {
|
||||
attributes := make([]object.Attribute, 0, 1+len(header))
|
||||
filename := object.NewAttribute()
|
||||
filename.SetKey(object.AttributeFileName)
|
||||
filename.SetValue(name)
|
||||
|
||||
attributes = append(attributes, *filename)
|
||||
|
||||
for key, val := range header {
|
||||
attr := object.NewAttribute()
|
||||
attr.SetKey(key)
|
||||
attr.SetValue(val)
|
||||
attributes = append(attributes, *attr)
|
||||
}
|
||||
|
||||
obj := object.New()
|
||||
obj.SetOwnerID(*own)
|
||||
obj.SetContainerID(cnrID)
|
||||
obj.SetAttributes(attributes...)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func newDir(cnrID cid.ID, cnr container.Container, defaultZone string) *fs.Dir {
|
||||
remote := cnrID.EncodeToString()
|
||||
timestamp := container.CreatedAt(cnr)
|
||||
|
||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
||||
if defaultZone != domain.Zone() {
|
||||
remote = domain.Name() + "." + domain.Zone()
|
||||
} else {
|
||||
remote = domain.Name()
|
||||
}
|
||||
}
|
||||
|
||||
dir := fs.NewDir(remote, timestamp)
|
||||
dir.SetID(cnrID.String())
|
||||
return dir
|
||||
}
|
||||
|
||||
func getContainerNameAndZone(containerStr, defaultZone string) (cnrName string, cnrZone string) {
|
||||
defer func() {
|
||||
if len(cnrZone) == 0 {
|
||||
cnrZone = defaultZone
|
||||
}
|
||||
}()
|
||||
if idx := strings.Index(containerStr, "."); idx >= 0 {
|
||||
return containerStr[:idx], containerStr[idx+1:]
|
||||
}
|
||||
return containerStr, defaultZone
|
||||
}
|
205
backend/frostfs/util_test.go
Normal file
205
backend/frostfs/util_test.go
Normal file
|
@ -0,0 +1,205 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetZoneAndContainerNames(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
cnrStr string
|
||||
defZone string
|
||||
expectedName string
|
||||
expectedZone string
|
||||
}{
|
||||
{
|
||||
cnrStr: "",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "cnr_name",
|
||||
defZone: "def_zone",
|
||||
expectedName: "cnr_name",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "cnr_name.",
|
||||
defZone: "def_zone",
|
||||
expectedName: "cnr_name",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: ".cnr_zone",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "cnr_zone",
|
||||
}, {
|
||||
cnrStr: ".cnr_zone",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "cnr_zone",
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
actualName, actualZone := getContainerNameAndZone(tc.cnrStr, tc.defZone)
|
||||
require.Equal(t, tc.expectedZone, actualZone)
|
||||
require.Equal(t, tc.expectedName, actualName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseContainerCreationPolicy(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
ACLString string
|
||||
ExpectedError bool
|
||||
}{
|
||||
{
|
||||
ACLString: "",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-ready",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read-write",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "private",
|
||||
ExpectedError: false,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
rules, err := parseContainerCreationPolicyString(tc.ACLString)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, rules)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rules)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
EndpointsParam string
|
||||
ExpectedError bool
|
||||
ExpectedResult []endpointInfo
|
||||
}{
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2,3",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: " s01.frostfs.devenv:8080 s02.frostfs.devenv:8080 ",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s03.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,-1,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,,",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,sd,sd",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,0",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
res, err := parseEndpoints(tc.EndpointsParam)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.ExpectedResult, res)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
|||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
|||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
`, "|", "`"),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
|
311
backend/gofile/api/types.go
Normal file
311
backend/gofile/api/types.go
Normal file
|
@ -0,0 +1,311 @@
|
|||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
1646
backend/gofile/gofile.go
Normal file
1646
backend/gofile/gofile.go
Normal file
File diff suppressed because it is too large
Load diff
17
backend/gofile/gofile_test.go
Normal file
17
backend/gofile/gofile_test.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
|
@ -697,7 +697,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
|
|
|
@ -620,9 +620,7 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -535,6 +535,17 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
|
|
@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
|
|||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path = path + expires
|
||||
path += expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
|
|
@ -1487,16 +1487,38 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.mkParentDir(ctx, remote); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
if err == nil {
|
||||
var createTime time.Time
|
||||
var createTimeMeta bool
|
||||
var modTime time.Time
|
||||
var modTimeMeta bool
|
||||
if meta != nil {
|
||||
createTime, createTimeMeta = srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
}
|
||||
modTime, modTimeMeta = srcObj.parseFsMetadataTime(meta, "mtime")
|
||||
if !modTimeMeta {
|
||||
modTime = srcObj.modTime
|
||||
}
|
||||
}
|
||||
if bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
// Workaround necessary when destination was a trashed file, to avoid the copied file also being in trash (bug in api?)
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
} else if createTimeMeta || modTimeMeta {
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -1523,12 +1545,30 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.mkParentDir(ctx, remote); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
|
||||
if err == nil && meta != nil {
|
||||
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
}
|
||||
modTime, modTimeMeta := srcObj.parseFsMetadataTime(meta, "mtime")
|
||||
if !modTimeMeta {
|
||||
modTime = srcObj.modTime
|
||||
}
|
||||
if createTimeMeta || modTimeMeta {
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
@ -1786,6 +1826,20 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
|||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// parseFsMetadataTime parses a time string from fs.Metadata with key
|
||||
func (o *Object) parseFsMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
t, err = time.Parse(time.RFC3339Nano, value) // metadata stores RFC3339Nano timestamps
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return t, ok
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
|
@ -1957,21 +2011,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
var createdTime string
|
||||
var modTime string
|
||||
if meta != nil {
|
||||
if v, ok := meta["btime"]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
|
||||
} else {
|
||||
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
|
||||
}
|
||||
if t, ok := o.parseFsMetadataTime(meta, "btime"); ok {
|
||||
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
|
||||
}
|
||||
if v, ok := meta["mtime"]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
|
||||
} else {
|
||||
modTime = api.Rfc3339Time(t).String()
|
||||
}
|
||||
if t, ok := o.parseFsMetadataTime(meta, "mtime"); ok {
|
||||
modTime = api.Rfc3339Time(t).String()
|
||||
}
|
||||
}
|
||||
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
|
||||
|
|
|
@ -59,7 +59,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||
//"utime" - read-only
|
||||
//"content-type" - read-only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
|
93
backend/local/clone_darwin.go
Normal file
93
backend/local/clone_darwin.go
Normal file
|
@ -0,0 +1,93 @@
|
|||
//go:build darwin && cgo
|
||||
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// # This is stored with the remote path given
|
||||
//
|
||||
// # It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Create destination
|
||||
dstObj := f.newObject(remote)
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.path
|
||||
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = Clone(srcPath, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
|
||||
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
|
||||
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
|
||||
func Clone(src, dst string) error {
|
||||
state := apfs.CopyFileStateAlloc()
|
||||
defer func() {
|
||||
if err := apfs.CopyFileStateFree(state); err != nil {
|
||||
fs.Errorf(dst, "free state error: %v", err)
|
||||
}
|
||||
}()
|
||||
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
|
||||
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
16
backend/local/lchmod.go
Normal file
16
backend/local/lchmod.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
41
backend/local/lchmod_unix.go
Normal file
41
backend/local/lchmod_unix.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
//go:build windows || plan9 || js
|
||||
//go:build plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
|
19
backend/local/lchtimes_windows.go
Normal file
19
backend/local/lchtimes_windows.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
|
@ -32,9 +32,11 @@ import (
|
|||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
|
||||
// timeType allows the user to choose what exactly ModTime() returns
|
||||
type timeType = fs.Enum[timeTypeChoices]
|
||||
|
@ -78,41 +80,46 @@ supported by all file systems) under the "user.*" prefix.
|
|||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
|
||||
|
@ -122,11 +129,12 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
|||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
@ -140,11 +148,12 @@ some OSes.
|
|||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
|
@ -175,68 +184,96 @@ directory listing (where the initial stat value comes from on Windows)
|
|||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_clone",
|
||||
Help: `Disable reflink cloning for server-side copies.
|
||||
|
||||
Normally, for local-to-local transfers, rclone will "clone" the file when
|
||||
possible, and fall back to "copying" only when cloning is not supported.
|
||||
|
||||
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
|
||||
the original file. Unlike a "hardlink", the two files are independent and
|
||||
neither will affect the other if subsequently modified.
|
||||
|
||||
Cloning is usually preferable to copying, as it is much faster and is
|
||||
deduplicated by default (i.e. having two identical files does not consume more
|
||||
storage than having just one.) However, for use cases where data redundancy is
|
||||
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
|
||||
|
||||
Currently, cloning is only supported when using APFS on macOS (support for other
|
||||
platforms may be added in the future.)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
|
||||
Normally rclone does all operations on the mtime or Modification time.
|
||||
|
||||
|
@ -255,27 +292,29 @@ will silently replace it with the modification time which all OSes support.
|
|||
Note that setting the time will still set the modified time so this is
|
||||
only useful for reading.
|
||||
`,
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
}},
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
},
|
||||
},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
@ -296,6 +335,7 @@ type Options struct {
|
|||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
|
@ -384,6 +424,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
if opt.NoClone {
|
||||
// Disable server-side copy when --local-no-clone is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
|
@ -1555,33 +1599,60 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
err := o.writeMetadata(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetMetadata failed on Object: %w", err)
|
||||
}
|
||||
// Re-read info now we have finished setting stuff
|
||||
return o.lstat()
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
var vol string
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
vol = filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
s = s[len(vol):]
|
||||
}
|
||||
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
|
||||
// Take care of the case Standard: ././‛. (the first dot means current directory)
|
||||
if enc != encoder.Standard {
|
||||
s = filepath.ToSlash(s)
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
if (p == ".") || (p == "..") {
|
||||
encoded[i] = p
|
||||
continue
|
||||
}
|
||||
part := enc.FromStandardName(p)
|
||||
changed = changed || part != p
|
||||
encoded[i] = part
|
||||
}
|
||||
if changed {
|
||||
s = strings.Join(encoded, "/")
|
||||
}
|
||||
s = filepath.FromSlash(s)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = vol + s
|
||||
}
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
if !noUNC {
|
||||
// Convert to UNC. It does nothing on non windows platforms.
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -1629,6 +1700,7 @@ var (
|
|||
_ fs.MkdirMetadataer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
_ fs.SetMetadataer = &Object{}
|
||||
_ fs.Directory = &Directory{}
|
||||
_ fs.SetModTimer = &Directory{}
|
||||
_ fs.SetMetadataer = &Directory{}
|
||||
|
|
|
@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) {
|
|||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Test corrupted on transfer
|
||||
|
@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
b := bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
@ -269,22 +268,66 @@ func TestMetadata(t *testing.T) {
|
|||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += linkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
|
@ -307,6 +350,10 @@ func TestMetadata(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
|
@ -321,18 +368,21 @@ func TestMetadata(t *testing.T) {
|
|||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
@ -342,13 +392,12 @@ func TestMetadata(t *testing.T) {
|
|||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
|
@ -372,6 +421,10 @@ func TestMetadata(t *testing.T) {
|
|||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -381,7 +434,11 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
|
@ -391,11 +448,10 @@ func TestMetadata(t *testing.T) {
|
|||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
|
@ -572,4 +628,35 @@ func TestCopySymlink(t *testing.T) {
|
|||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
|
||||
// Set fs into "-L/--copy-links" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err = f.NewObject(ctx, "src/link.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a NON-symlink and it has the right contents
|
||||
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||
fi, err := os.Lstat(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
|
||||
// Test that copying a normal file also works
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package local
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -72,12 +73,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
|
|||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
parsed, err := strconv.ParseInt(value, base, 0)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(result64)
|
||||
result = int(parsed)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
|
@ -104,7 +105,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
err = setBTime(o.path, btime)
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
|
@ -120,7 +125,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
|
@ -128,9 +137,23 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
|
|
|
@ -13,3 +13,9 @@ func setBTime(name string, btime time.Time) error {
|
|||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -9,15 +9,20 @@ import (
|
|||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
// setTimes sets any of atime, mtime or btime
|
||||
// if link is set it sets a link rather than the target
|
||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
if link {
|
||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -27,6 +32,28 @@ func setBTime(name string, btime time.Time) (err error) {
|
|||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
var patime, pmtime, pbtime *syscall.Filetime
|
||||
if !atime.IsZero() {
|
||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
||||
patime = &t
|
||||
}
|
||||
if !mtime.IsZero() {
|
||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
||||
pmtime = &t
|
||||
}
|
||||
if !btime.IsZero() {
|
||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
||||
pbtime = &t
|
||||
}
|
||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
||||
}
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
||||
}
|
||||
|
|
|
@ -46,8 +46,8 @@ import (
|
|||
|
||||
// Global constants
|
||||
const (
|
||||
minSleepPacer = 10 * time.Millisecond
|
||||
maxSleepPacer = 2 * time.Second
|
||||
minSleepPacer = 100 * time.Millisecond
|
||||
maxSleepPacer = 5 * time.Second
|
||||
decayConstPacer = 2 // bigger for slower decay, exponential
|
||||
metaExpirySec = 20 * 60 // meta server expiration time
|
||||
serverExpirySec = 3 * 60 // download server expiration time
|
||||
|
|
|
@ -923,9 +923,7 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
|
|||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
files[0], files[i] = files[i], files[0]
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
|
|
|
@ -42,6 +42,8 @@ var _ error = (*Error)(nil)
|
|||
type Identity struct {
|
||||
DisplayName string `json:"displayName,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Email string `json:"email,omitempty"` // not officially documented, but seems to sometimes exist
|
||||
LoginName string `json:"loginName,omitempty"` // SharePoint only
|
||||
}
|
||||
|
||||
// IdentitySet is a keyed collection of Identity objects. It is used
|
||||
|
@ -51,6 +53,9 @@ type IdentitySet struct {
|
|||
User Identity `json:"user,omitempty"`
|
||||
Application Identity `json:"application,omitempty"`
|
||||
Device Identity `json:"device,omitempty"`
|
||||
Group Identity `json:"group,omitempty"`
|
||||
SiteGroup Identity `json:"siteGroup,omitempty"` // The SharePoint group associated with this action. Optional.
|
||||
SiteUser Identity `json:"siteUser,omitempty"` // The SharePoint user associated with this action. Optional.
|
||||
}
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
|
@ -197,9 +202,14 @@ type SharingLinkType struct {
|
|||
type LinkType string
|
||||
|
||||
const (
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
ViewLinkType LinkType = "view"
|
||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EditLinkType LinkType = "edit"
|
||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
||||
// content into a host webpage. Embed links are not available for OneDrive for
|
||||
// Business or SharePoint.
|
||||
EmbedLinkType LinkType = "embed"
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
|
@ -207,32 +217,41 @@ const (
|
|||
type LinkScope string
|
||||
|
||||
const (
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
||||
// This may include people outside of your organization.
|
||||
AnonymousScope LinkScope = "anonymous"
|
||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
OrganizationScope LinkScope = "organization"
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
// Sharing permissions have a number of different forms. The Permission resource represents these different forms through facets on the resource.
|
||||
type PermissionsType struct {
|
||||
ID string `json:"id"` // The unique identifier of the permission among all permissions on the item. Read-only.
|
||||
GrantedTo *IdentitySet `json:"grantedTo,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only.
|
||||
GrantedToIdentities []*IdentitySet `json:"grantedToIdentities,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only.
|
||||
Invitation *SharingInvitationType `json:"invitation,omitempty"` // Details of any associated sharing invitation for this permission. Read-only.
|
||||
InheritedFrom *ItemReference `json:"inheritedFrom,omitempty"` // Provides a reference to the ancestor of the current permission, if it is inherited from an ancestor. Read-only.
|
||||
Link *SharingLinkType `json:"link,omitempty"` // Provides the link details of the current permission, if it is a link type permissions. Read-only.
|
||||
Roles []Role `json:"roles,omitempty"` // The type of permission (read, write, owner, member). Read-only.
|
||||
ShareID string `json:"shareId,omitempty"` // A unique token that can be used to access this shared item via the shares API. Read-only.
|
||||
ID string `json:"id"` // The unique identifier of the permission among all permissions on the item. Read-only.
|
||||
GrantedTo *IdentitySet `json:"grantedTo,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only. Deprecated on OneDrive Business only.
|
||||
GrantedToIdentities []*IdentitySet `json:"grantedToIdentities,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only. Deprecated on OneDrive Business only.
|
||||
GrantedToV2 *IdentitySet `json:"grantedToV2,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only. Not available for OneDrive Personal.
|
||||
GrantedToIdentitiesV2 []*IdentitySet `json:"grantedToIdentitiesV2,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only. Not available for OneDrive Personal.
|
||||
Invitation *SharingInvitationType `json:"invitation,omitempty"` // Details of any associated sharing invitation for this permission. Read-only.
|
||||
InheritedFrom *ItemReference `json:"inheritedFrom,omitempty"` // Provides a reference to the ancestor of the current permission, if it is inherited from an ancestor. Read-only.
|
||||
Link *SharingLinkType `json:"link,omitempty"` // Provides the link details of the current permission, if it is a link type permissions. Read-only.
|
||||
Roles []Role `json:"roles,omitempty"` // The type of permission (read, write, owner, member). Read-only.
|
||||
ShareID string `json:"shareId,omitempty"` // A unique token that can be used to access this shared item via the shares API. Read-only.
|
||||
}
|
||||
|
||||
// Role represents the type of permission (read, write, owner, member)
|
||||
type Role string
|
||||
|
||||
const (
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
||||
ReadRole Role = "read"
|
||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
WriteRole Role = "write"
|
||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
OwnerRole Role = "owner"
|
||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member"
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
|
@ -592,3 +611,25 @@ type SiteResource struct {
|
|||
type SiteResponse struct {
|
||||
Sites []SiteResource `json:"value"`
|
||||
}
|
||||
|
||||
// GetGrantedTo returns the GrantedTo property.
|
||||
// This is to get around the odd problem of
|
||||
// GrantedTo being deprecated on OneDrive Business, while
|
||||
// GrantedToV2 is unavailable on OneDrive Personal.
|
||||
func (p *PermissionsType) GetGrantedTo(driveType string) *IdentitySet {
|
||||
if driveType == "personal" {
|
||||
return p.GrantedTo
|
||||
}
|
||||
return p.GrantedToV2
|
||||
}
|
||||
|
||||
// GetGrantedToIdentities returns the GrantedToIdentities property.
|
||||
// This is to get around the odd problem of
|
||||
// GrantedToIdentities being deprecated on OneDrive Business, while
|
||||
// GrantedToIdentitiesV2 is unavailable on OneDrive Personal.
|
||||
func (p *PermissionsType) GetGrantedToIdentities(driveType string) []*IdentitySet {
|
||||
if driveType == "personal" {
|
||||
return p.GrantedToIdentities
|
||||
}
|
||||
return p.GrantedToIdentitiesV2
|
||||
}
|
||||
|
|
|
@ -133,6 +133,7 @@ func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
|||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
{Bit: uint64(rwFailOK), Name: "failok"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,6 +143,7 @@ type rwChoice = fs.Bits[rwChoices]
|
|||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwFailOK
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
|
@ -158,6 +160,9 @@ var rwExamples = fs.OptionExamples{{
|
|||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
}, {
|
||||
Value: rwFailOK.String(),
|
||||
Help: "If writing fails log errors only, don't fail the transfer",
|
||||
}}
|
||||
|
||||
// Metadata describes metadata properties shared by both Objects and Directories
|
||||
|
@ -363,6 +368,15 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
|
|||
if m.normalizedID == "" {
|
||||
return errors.New("internal error: normalizedID is missing")
|
||||
}
|
||||
if m.fs.opt.MetadataPermissions.IsSet(rwFailOK) {
|
||||
// If failok is set, allow the permissions setting to fail and only log an ERROR
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Errorf(m.fs, "Ignoring error as failok is set: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// compare current to queued and sort into add/update/remove queues
|
||||
add, update, remove := m.sortPermissions()
|
||||
|
@ -396,7 +410,7 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
|
|||
if n.ID != "" {
|
||||
// sanity check: ensure there's a matching "old" id with a non-matching role
|
||||
if !slices.ContainsFunc(old, func(o *api.PermissionsType) bool {
|
||||
return o.ID == n.ID && slices.Compare(o.Roles, n.Roles) != 0 && len(o.Roles) > 0 && len(n.Roles) > 0
|
||||
return o.ID == n.ID && slices.Compare(o.Roles, n.Roles) != 0 && len(o.Roles) > 0 && len(n.Roles) > 0 && !slices.Contains(o.Roles, api.OwnerRole)
|
||||
}) {
|
||||
fs.Debugf(m.remote, "skipping update for invalid roles: %v (perm ID: %v)", n.Roles, n.ID)
|
||||
continue
|
||||
|
@ -418,6 +432,10 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
|
|||
}
|
||||
}
|
||||
for _, o := range old {
|
||||
if slices.Contains(o.Roles, api.OwnerRole) {
|
||||
fs.Debugf(m.remote, "skipping remove permission -- can't remove 'owner' role")
|
||||
continue
|
||||
}
|
||||
newHasOld := slices.ContainsFunc(new, func(n *api.PermissionsType) bool {
|
||||
if n == nil || n.ID == "" {
|
||||
return false // can't remove perms without an ID
|
||||
|
@ -471,13 +489,13 @@ func (m *Metadata) processPermissions(ctx context.Context, add, update, remove [
|
|||
}
|
||||
|
||||
// fillRecipients looks for recipients to add from the permission passed in.
|
||||
// It looks for an email address in identity.User.ID and DisplayName, otherwise it uses the identity.User.ID as r.ObjectID.
|
||||
// It looks for an email address in identity.User.Email, ID, and DisplayName, otherwise it uses the identity.User.ID as r.ObjectID.
|
||||
// It considers both "GrantedTo" and "GrantedToIdentities".
|
||||
func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
||||
func fillRecipients(p *api.PermissionsType, driveType string) (recipients []api.DriveRecipient) {
|
||||
if p == nil {
|
||||
return recipients
|
||||
}
|
||||
ids := make(map[string]struct{}, len(p.GrantedToIdentities)+1)
|
||||
ids := make(map[string]struct{}, len(p.GetGrantedToIdentities(driveType))+1)
|
||||
isUnique := func(s string) bool {
|
||||
_, ok := ids[s]
|
||||
return !ok && s != ""
|
||||
|
@ -487,7 +505,10 @@ func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
|||
r := api.DriveRecipient{}
|
||||
|
||||
id := ""
|
||||
if strings.ContainsRune(identity.User.ID, '@') {
|
||||
if strings.ContainsRune(identity.User.Email, '@') {
|
||||
id = identity.User.Email
|
||||
r.Email = id
|
||||
} else if strings.ContainsRune(identity.User.ID, '@') {
|
||||
id = identity.User.ID
|
||||
r.Email = id
|
||||
} else if strings.ContainsRune(identity.User.DisplayName, '@') {
|
||||
|
@ -503,12 +524,31 @@ func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
|||
ids[id] = struct{}{}
|
||||
recipients = append(recipients, r)
|
||||
}
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
addRecipient(identity)
|
||||
|
||||
forIdentitySet := func(iSet *api.IdentitySet) {
|
||||
if iSet == nil {
|
||||
return
|
||||
}
|
||||
iS := *iSet
|
||||
forIdentity := func(i api.Identity) {
|
||||
if i != (api.Identity{}) {
|
||||
iS.User = i
|
||||
addRecipient(&iS)
|
||||
}
|
||||
}
|
||||
forIdentity(iS.User)
|
||||
forIdentity(iS.SiteUser)
|
||||
forIdentity(iS.Group)
|
||||
forIdentity(iS.SiteGroup)
|
||||
forIdentity(iS.Application)
|
||||
forIdentity(iS.Device)
|
||||
}
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) {
|
||||
addRecipient(p.GrantedTo)
|
||||
|
||||
for _, identitySet := range p.GetGrantedToIdentities(driveType) {
|
||||
forIdentitySet(identitySet)
|
||||
}
|
||||
forIdentitySet(p.GetGrantedTo(driveType))
|
||||
|
||||
return recipients
|
||||
}
|
||||
|
||||
|
@ -518,7 +558,7 @@ func (m *Metadata) addPermission(ctx context.Context, p *api.PermissionsType) (n
|
|||
opts := m.fs.newOptsCall(m.normalizedID, "POST", "/invite")
|
||||
|
||||
req := &api.AddPermissionsRequest{
|
||||
Recipients: fillRecipients(p),
|
||||
Recipients: fillRecipients(p, m.fs.driveType),
|
||||
RequireSignIn: m.fs.driveType != driveTypePersonal, // personal and business have conflicting requirements
|
||||
Roles: p.Roles,
|
||||
}
|
||||
|
|
|
@ -109,7 +109,8 @@ To update an existing permission, include both the Permission ID and the new
|
|||
`roles` to be assigned. `roles` is the only property that can be changed.
|
||||
|
||||
To remove permissions, pass in a blob containing only the permissions you wish
|
||||
to keep (which can be empty, to remove all.)
|
||||
to keep (which can be empty, to remove all.) Note that the `owner` role will be
|
||||
ignored, as it cannot be removed.
|
||||
|
||||
Note that both reading and writing permissions requires extra API calls, so if
|
||||
you don't need to read or write permissions it is recommended to omit
|
||||
|
|
|
@ -241,6 +241,18 @@ modification time and removes all but the last version.
|
|||
this flag there.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: `Permanently delete files on removal.
|
||||
|
||||
Normally files will get sent to the recycle bin on deletion. Setting
|
||||
this flag causes them to be permanently deleted. Use with care.
|
||||
|
||||
OneDrive personal accounts do not support the permanentDelete API,
|
||||
it only applies to OneDrive for Business and SharePoint document libraries.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "link_scope",
|
||||
Default: "anonymous",
|
||||
|
@ -695,6 +707,7 @@ type Options struct {
|
|||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
|
@ -814,7 +827,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429: // Too Many Requests.
|
||||
case 429, 503: // Too Many Requests, Server Too Busy
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(values[0])
|
||||
|
@ -929,7 +942,8 @@ func errorHandler(resp *http.Response) error {
|
|||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
// Redirects have no body so don't report an error
|
||||
if err != nil && resp.Header.Get("Location") == "" {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
if errResponse.ErrorInfo.Code == "" {
|
||||
|
@ -1479,7 +1493,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := f.newOptsCall(id, "DELETE", "")
|
||||
var opts rest.Opts
|
||||
if f.opt.HardDelete {
|
||||
opts = f.newOptsCall(id, "POST", "/permanentDelete")
|
||||
} else {
|
||||
opts = f.newOptsCall(id, "DELETE", "")
|
||||
}
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
|
@ -1909,7 +1928,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||
return shareURL, nil
|
||||
}
|
||||
|
||||
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
|
||||
const cnvFailMsg = "Don't know how to convert share link to direct link - returning the link as is"
|
||||
directURL := ""
|
||||
segments := strings.Split(shareURL, "/")
|
||||
switch f.driveType {
|
||||
|
@ -2520,6 +2539,9 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
|
|||
}
|
||||
// Set the mod time now and read metadata
|
||||
info, err = o.fs.fetchAndUpdateMetadata(ctx, src, options, o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
|||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
// add a permission with "read" role
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
|
@ -59,7 +59,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
|||
found, num := false, 0
|
||||
foundCount := 0
|
||||
for i, p := range actualP {
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
for _, identity := range p.GetGrantedToIdentities(f.driveType) {
|
||||
if identity.User.DisplayName == testUserID {
|
||||
// note: expected will always be element 0 here, but actual may be variable based on org settings
|
||||
assert.Equal(t, expectedP[0].Roles, p.Roles)
|
||||
|
@ -68,7 +68,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
|||
}
|
||||
}
|
||||
if f.driveType == driveTypePersonal {
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) && p.GrantedTo.User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
if p.GetGrantedTo(f.driveType) != nil && p.GetGrantedTo(f.driveType).User != (api.Identity{}) && p.GetGrantedTo(f.driveType).User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
assert.Equal(t, expectedP[0].Roles, p.Roles)
|
||||
found, num = true, i
|
||||
foundCount++
|
||||
|
@ -106,7 +106,7 @@ func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
|||
found = false
|
||||
var foundP *api.PermissionsType
|
||||
for _, p := range actualP {
|
||||
if p.GrantedTo == nil || p.GrantedTo.User == (api.Identity{}) || p.GrantedTo.User.ID != testUserID {
|
||||
if p.GetGrantedTo(f.driveType) == nil || p.GetGrantedTo(f.driveType).User == (api.Identity{}) || p.GetGrantedTo(f.driveType).User.ID != testUserID {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
|
@ -134,7 +134,7 @@ func (f *Fs) TestReadPermissions(t *testing.T, r *fstest.Run) {
|
|||
// test that what we got before vs. after is the same
|
||||
_ = f.opt.MetadataPermissions.Set("read")
|
||||
_, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
if f.driveType == driveTypePersonal {
|
||||
perms, ok := actualMeta["permissions"]
|
||||
|
@ -150,7 +150,7 @@ func (f *Fs) TestReadMetadata(t *testing.T, r *fstest.Run) {
|
|||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
file1 := r.WriteFile(randomFilename(), "hello", t2)
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
|
@ -174,7 +174,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
|||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
|
||||
expectedMeta := fs.Metadata{
|
||||
|
@ -288,7 +288,7 @@ func (f *Fs) TestServerSideCopyMove(t *testing.T, r *fstest.Run) {
|
|||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
// add a permission with "read" role
|
||||
permissions := defaultPermissions()
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
|
@ -331,7 +331,10 @@ func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
|
|||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
const blob = `{"Metadata":{"permissions":"[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
|
||||
blob := `{"Metadata":{"permissions":"[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
|
||||
if f.driveType != driveTypePersonal {
|
||||
blob = `{"Metadata":{"permissions":"[{\"grantedToIdentitiesV2\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"}}`
|
||||
}
|
||||
|
||||
// Copy
|
||||
ci.MetadataMapper = []string{"echo", blob}
|
||||
|
@ -347,7 +350,7 @@ func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
|
|||
found := false
|
||||
foundCount := 0
|
||||
for _, p := range actualP {
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
for _, identity := range p.GetGrantedToIdentities(f.driveType) {
|
||||
if identity.User.DisplayName == testUserID {
|
||||
assert.Equal(t, []api.Role{api.ReadRole}, p.Roles)
|
||||
found = true
|
||||
|
@ -355,7 +358,7 @@ func (f *Fs) TestMetadataMapper(t *testing.T, r *fstest.Run) {
|
|||
}
|
||||
}
|
||||
if f.driveType == driveTypePersonal {
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) && p.GrantedTo.User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
if p.GetGrantedTo(f.driveType) != nil && p.GetGrantedTo(f.driveType).User != (api.Identity{}) && p.GetGrantedTo(f.driveType).User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
assert.Equal(t, []api.Role{api.ReadRole}, p.Roles)
|
||||
found = true
|
||||
foundCount++
|
||||
|
@ -376,7 +379,7 @@ func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, p
|
|||
}
|
||||
|
||||
expectedMeta.Set("permissions", marshalPerms(t, perms))
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, content, true, "plain/text", expectedMeta)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, false, content, true, "plain/text", expectedMeta)
|
||||
do, ok := obj.(fs.Metadataer)
|
||||
require.True(t, ok)
|
||||
actualMeta, err := do.Metadata(ctx)
|
||||
|
@ -449,11 +452,18 @@ func indent(t *testing.T, s string) string {
|
|||
return marshalPerms(t, p)
|
||||
}
|
||||
|
||||
func defaultPermissions() []*api.PermissionsType {
|
||||
func defaultPermissions(driveType string) []*api.PermissionsType {
|
||||
if driveType == driveTypePersonal {
|
||||
return []*api.PermissionsType{{
|
||||
GrantedTo: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
}}
|
||||
}
|
||||
return []*api.PermissionsType{{
|
||||
GrantedTo: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
GrantedToV2: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentitiesV2: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
}}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,10 @@ package quickxorhash
|
|||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import "hash"
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
|
@ -48,6 +51,11 @@ func New() hash.Hash {
|
|||
return &quickXorHash{}
|
||||
}
|
||||
|
||||
// xor dst with src
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
//
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
|
@ -58,12 +58,10 @@ func populateSSECustomerKeys(opt *Options) error {
|
|||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
} else if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
|
|
|
@ -148,7 +148,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
|||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary)
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
|
@ -279,7 +279,7 @@ func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
|||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
copy(w.md5s[start:end], (*md5binary))
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
|
|
|
@ -109,6 +109,37 @@ type Hashes struct {
|
|||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// FileTruncateResponse is the response from /file_truncate
|
||||
type FileTruncateResponse struct {
|
||||
Error
|
||||
}
|
||||
|
||||
// FileCloseResponse is the response from /file_close
|
||||
type FileCloseResponse struct {
|
||||
Error
|
||||
}
|
||||
|
||||
// FileOpenResponse is the response from /file_open
|
||||
type FileOpenResponse struct {
|
||||
Error
|
||||
Fileid int64 `json:"fileid"`
|
||||
FileDescriptor int64 `json:"fd"`
|
||||
}
|
||||
|
||||
// FileChecksumResponse is the response from /file_checksum
|
||||
type FileChecksumResponse struct {
|
||||
Error
|
||||
MD5 string `json:"md5"`
|
||||
SHA1 string `json:"sha1"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// FilePWriteResponse is the response from /file_pwrite
|
||||
type FilePWriteResponse struct {
|
||||
Error
|
||||
Bytes int64 `json:"bytes"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
type UploadFileResponse struct {
|
||||
Error
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -146,7 +147,8 @@ we have to rely on user password authentication for it.`,
|
|||
Help: "Your pcloud password.",
|
||||
IsPassword: true,
|
||||
Advanced: true,
|
||||
}}...),
|
||||
},
|
||||
}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -161,15 +163,16 @@ type Options struct {
|
|||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
ts *oauthutil.TokenSource // the token source, used to create new clients
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a pcloud object
|
||||
|
@ -317,6 +320,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ts: ts,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
@ -326,6 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if !canCleanup {
|
||||
f.features.CleanUp = nil
|
||||
|
@ -333,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
@ -375,6 +380,56 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
return f, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
client, err := f.newSingleConnClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
// init an empty file
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve src: %w", err)
|
||||
}
|
||||
openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
// Create a new http client, accepting keep-alive headers, limited to single connection.
|
||||
// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection.
|
||||
// File descriptors are only valid within the same connection and auto-closed when the connection is closed,
|
||||
// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions.
|
||||
func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
t.MaxConnsPerHost = 1
|
||||
t.DisableKeepAlives = false
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
|
@ -1094,9 +1149,42 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// Pcloud doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be re-uploaded to set the time.
|
||||
return fs.ErrorCantSetModTime
|
||||
filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileID := fileIDtoNumber(o.id)
|
||||
filename = o.fs.opt.Enc.FromStandardName(filename)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/copyfile",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileID)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("toname", filename)
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10))
|
||||
opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10))
|
||||
|
||||
result := &api.ItemResult{}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("update mtime: copyfile: %w", err)
|
||||
}
|
||||
if err := o.setMetaData(&result.Metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
|
|
216
backend/pcloud/writer_at.go
Normal file
216
backend/pcloud/writer_at.go
Normal file
|
@ -0,0 +1,216 @@
|
|||
package pcloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pcloud/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
if c.size < 0 {
|
||||
// Without knowing the size, we cannot do size checks.
|
||||
// Falling back to a sleep of 1s for sake of hope.
|
||||
time.Sleep(1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
sizeOk := false
|
||||
sizeLastSeen := int64(0)
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
||||
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get uploaded obj: %w", err)
|
||||
}
|
||||
sizeLastSeen = obj.Size()
|
||||
if obj.Size() == c.size {
|
||||
sizeOk = true
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
if !sizeOk {
|
||||
return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteAt implements fs.WriteAt.
|
||||
func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
contentLength := len(buffer)
|
||||
|
||||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
outSHA1 := outChecksum.SHA1
|
||||
|
||||
if outSHA1 == "" || inSHA1 == "" {
|
||||
return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1)
|
||||
}
|
||||
|
||||
// check hash of buffer, skip if fits
|
||||
if inSHA1 == outSHA1 {
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
filename = srcFs.opt.Enc.FromStandardName(filename)
|
||||
opts.Parameters.Set("name", filename)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_checksum",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
ctx context.Context,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
contentLength := int64(len(buf))
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_pwrite",
|
||||
Body: bytes.NewReader(buf),
|
||||
ContentLength: &contentLength,
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: false,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("close file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
|
@ -53,7 +53,7 @@ const (
|
|||
PhaseTypePending = "PHASE_TYPE_PENDING"
|
||||
UploadTypeForm = "UPLOAD_TYPE_FORM"
|
||||
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
|
||||
ListLimit = 100
|
||||
ListLimit = 500
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -156,6 +156,7 @@ type FileList struct {
|
|||
NextPageToken string `json:"next_page_token"`
|
||||
Version string `json:"version,omitempty"`
|
||||
VersionOutdated bool `json:"version_outdated,omitempty"`
|
||||
SyncTime Time `json:"sync_time"`
|
||||
}
|
||||
|
||||
// File is a basic element representing a single file object
|
||||
|
@ -165,17 +166,17 @@ type FileList struct {
|
|||
// 2) the other from File.Medias[].Link.URL.
|
||||
// Empirically, 2) is less restrictive to multiple concurrent range-requests
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for meadia.
|
||||
// However, it is not generally applicable as it is only for media.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
|
@ -191,11 +192,14 @@ type File struct {
|
|||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
ReferenceEvents []interface{} `json:"reference_events"`
|
||||
ReferenceResource interface{} `json:"reference_resource"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
Tags []interface{} `json:"tags"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
|
@ -241,15 +245,18 @@ type Media struct {
|
|||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
Category string `json:"category,omitempty"` // "category_origin"
|
||||
Audio interface{} `json:"audio"` // TODO: undiscovered yet
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
type FileParams struct {
|
||||
DeviceID string `json:"device_id,omitempty"`
|
||||
Duration int64 `json:"duration,omitempty,string"` // in seconds
|
||||
Height int `json:"height,omitempty,string"`
|
||||
Platform string `json:"platform,omitempty"` // "Upload"
|
||||
PlatformIcon string `json:"platform_icon,omitempty"`
|
||||
TaskID string `json:"task_id"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Width int `json:"width,omitempty,string"`
|
||||
}
|
||||
|
@ -395,6 +402,7 @@ type Quota struct {
|
|||
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
|
||||
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
|
||||
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
|
||||
IsUnlimited bool `json:"is_unlimited,omitempty"`
|
||||
}
|
||||
|
||||
// Share is a response to RequestShare
|
||||
|
@ -478,7 +486,7 @@ type RequestNewFile struct {
|
|||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Hash string `json:"hash,omitempty"` // gcid
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
|
@ -505,6 +513,72 @@ type RequestDecompress struct {
|
|||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// CaptchaToken is a response to requestCaptchaToken api call
|
||||
type CaptchaToken struct {
|
||||
CaptchaToken string `json:"captcha_token"`
|
||||
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
||||
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *CaptchaToken) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *CaptchaToken) Valid() bool {
|
||||
return t != nil && t.CaptchaToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// CaptchaTokenRequest is to request for captcha token
|
||||
type CaptchaTokenRequest struct {
|
||||
Action string `json:"action,omitempty"`
|
||||
CaptchaToken string `json:"captcha_token,omitempty"`
|
||||
ClientID string `json:"client_id,omitempty"`
|
||||
DeviceID string `json:"device_id,omitempty"`
|
||||
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
|
||||
type CaptchaTokenMeta struct {
|
||||
CaptchaSign string `json:"captcha_sign,omitempty"`
|
||||
ClientVersion string `json:"client_version,omitempty"`
|
||||
PackageName string `json:"package_name,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
|
||||
UserName string `json:"username,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
}
|
||||
|
||||
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
|
||||
type Token struct {
|
||||
TokenType string `json:"token_type"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
Sub string `json:"sub"`
|
||||
}
|
||||
|
||||
// Expiry returns expiry from expires in, so it should be called on retrieval
|
||||
// e must be non-nil.
|
||||
func (e *Token) Expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
|
|
@ -3,21 +3,32 @@ package pikpak
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
cachePrefix = "rclone-pikpak-gcid-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
|
@ -80,19 +91,21 @@ func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
|||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
}
|
||||
info := struct {
|
||||
TaskID string `json:"task_id"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
|
@ -141,12 +154,14 @@ func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error)
|
|||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New("not PHASE_TYPE_COMPLETE")
|
||||
if err == nil && !info.Links.ApplicationOctetStream.Valid() {
|
||||
return true, errors.New("no link")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err == nil {
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -167,6 +182,57 @@ func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api
|
|||
return
|
||||
}
|
||||
|
||||
// getTask gets api.Task from API for the ID passed
|
||||
func (f *Fs) getTask(ctx context.Context, ID string, checkPhase bool) (info *api.Task, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/tasks/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if checkPhase {
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after the task is created
|
||||
return true, fmt.Errorf("%s (%s) is still in %s", info.Name, info.Type, info.Phase)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// waitTask waits for async tasks to be completed
|
||||
func (f *Fs) waitTask(ctx context.Context, ID string) (err error) {
|
||||
time.Sleep(taskWaitTime)
|
||||
if info, err := f.getTask(ctx, ID, true); err != nil {
|
||||
if info == nil {
|
||||
return fmt.Errorf("can't verify the task is completed: %q", ID)
|
||||
}
|
||||
return fmt.Errorf("can't verify the task is completed: %#v", info)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// deleteTask remove a task having the specified ID
|
||||
func (f *Fs) deleteTask(ctx context.Context, ID string, deleteFiles bool) (err error) {
|
||||
params := url.Values{}
|
||||
params.Set("delete_files", strconv.FormatBool(deleteFiles))
|
||||
params.Set("task_ids", ID)
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/drive/v1/tasks",
|
||||
Parameters: params,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, nil)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getAbout gets drive#quota information from server
|
||||
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
|
||||
opts := rest.Opts{
|
||||
|
@ -195,16 +261,47 @@ func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api
|
|||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
// getGcid retrieves Gcid cached in API server
|
||||
func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err error) {
|
||||
cid, err := calcCid(ctx, src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
// If src is zero-length, the API will return
|
||||
// Error "cid and file_size is required" (400)
|
||||
// In this case, we can simply return cid == gcid
|
||||
return cid, nil
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("cid", cid)
|
||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
}
|
||||
|
||||
info := struct {
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return info.Gcid, nil
|
||||
}
|
||||
|
||||
// Read the gcid of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
func readGcid(in io.Reader, size, threshold int64) (gcid string, out io.Reader, cleanup func(), err error) {
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
|
@ -227,8 +324,11 @@ func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reade
|
|||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
// use the teeReader to write to the local file AND calculate the gcid while doing so
|
||||
teeReader := io.TeeReader(in, tempFile)
|
||||
|
||||
// copy the ENTIRE file to disk and calculate the gcid in the process
|
||||
if gcid, err = calcGcid(teeReader, size); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
|
@ -239,15 +339,319 @@ func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reade
|
|||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
teeReader := io.TeeReader(in, buf)
|
||||
|
||||
if gcid, err = calcGcid(teeReader, size); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
out = buf
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
return
|
||||
}
|
||||
|
||||
// calcGcid calculates Gcid from reader
|
||||
//
|
||||
// Gcid is a custom hash to index a file contents
|
||||
func calcGcid(r io.Reader, size int64) (string, error) {
|
||||
calcBlockSize := func(j int64) int64 {
|
||||
var psize int64 = 0x40000
|
||||
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
|
||||
psize <<= 1
|
||||
}
|
||||
return psize
|
||||
}
|
||||
|
||||
totalHash := sha1.New()
|
||||
blockHash := sha1.New()
|
||||
readSize := calcBlockSize(size)
|
||||
for {
|
||||
blockHash.Reset()
|
||||
if n, err := io.CopyN(blockHash, r, readSize); err != nil && n == 0 {
|
||||
if err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
break
|
||||
}
|
||||
totalHash.Write(blockHash.Sum(nil))
|
||||
}
|
||||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// unWrapObjectInfo returns the underlying Object unwrapped as much as
|
||||
// possible or nil even if it is an OverrideRemote
|
||||
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
|
||||
if o, ok := oi.(fs.Object); ok {
|
||||
return fs.UnWrapObject(o)
|
||||
} else if do, ok := oi.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
return do.UnWrap()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// calcCid calculates Cid from source
|
||||
//
|
||||
// Cid is a simplified version of Gcid
|
||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
srcObj := unWrapObjectInfo(src)
|
||||
if srcObj == nil {
|
||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
hash := sha1.New()
|
||||
var rc io.ReadCloser
|
||||
|
||||
readHash := func(start, length int64) (err error) {
|
||||
end := start + length - 1
|
||||
if rc, err = srcObj.Open(ctx, &fs.RangeOption{Start: start, End: end}); err != nil {
|
||||
return fmt.Errorf("failed to open src with range (%d, %d): %w", start, end, err)
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
_, err = io.Copy(hash, rc)
|
||||
return err
|
||||
}
|
||||
|
||||
if size <= 0xF000 { // 61440 = 60KB
|
||||
err = readHash(0, size)
|
||||
} else { // 20KB from three different parts
|
||||
for _, start := range []int64{0, size / 3, size - 0x5000} {
|
||||
err = readHash(start, 0x5000)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to hash: %w", err)
|
||||
}
|
||||
cid = strings.ToUpper(hex.EncodeToString(hash.Sum(nil)))
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// randomly generates device id used for request header 'x-device-id'
|
||||
//
|
||||
// original javascript implementation
|
||||
//
|
||||
// return "xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx".replace(/[xy]/g, (e) => {
|
||||
// const t = (16 * Math.random()) | 0;
|
||||
// return ("x" == e ? t : (3 & t) | 8).toString(16);
|
||||
// });
|
||||
func genDeviceID() string {
|
||||
base := []byte("xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx")
|
||||
for i, char := range base {
|
||||
switch char {
|
||||
case 'x':
|
||||
base[i] = fmt.Sprintf("%x", rand.Intn(16))[0]
|
||||
case 'y':
|
||||
base[i] = fmt.Sprintf("%x", rand.Intn(16)&3|8)[0]
|
||||
}
|
||||
}
|
||||
return string(base)
|
||||
}
|
||||
|
||||
var md5Salt = []string{
|
||||
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||
"+r6CQVxjzJV6LCV",
|
||||
"F",
|
||||
"pFJRC",
|
||||
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||
"/750aCr4lm/Sly/c",
|
||||
"RB+DT/gZCrbV",
|
||||
"",
|
||||
"CyLsf7hdkIRxRm215hl",
|
||||
"7xHvLi2tOYP0Y92b",
|
||||
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||
"1UI3",
|
||||
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||
"ihtqpG6FMt65+Xk+tWUH2",
|
||||
"NhXXU9rg4XXdzo7u5o",
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
|
||||
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
||||
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
|
||||
for _, salt := range md5Salt {
|
||||
str = md5Sum(str + salt)
|
||||
}
|
||||
sign = "1." + str
|
||||
return
|
||||
}
|
||||
|
||||
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
|
||||
req = &api.CaptchaTokenRequest{
|
||||
Action: action,
|
||||
CaptchaToken: oldToken, // can be empty initially
|
||||
ClientID: clientID,
|
||||
DeviceID: opt.DeviceID,
|
||||
Meta: new(api.CaptchaTokenMeta),
|
||||
}
|
||||
switch action {
|
||||
case "POST:/v1/auth/signin":
|
||||
req.Meta.UserName = opt.Username
|
||||
default:
|
||||
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
|
||||
req.Meta.CaptchaSign = captchaSign
|
||||
req.Meta.Timestamp = timestamp
|
||||
req.Meta.ClientVersion = clientVersion
|
||||
req.Meta.PackageName = packageName
|
||||
req.Meta.UserID = opt.UserID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CaptchaTokenSource stores updated captcha tokens in the config file
|
||||
type CaptchaTokenSource struct {
|
||||
mu sync.Mutex
|
||||
m configmap.Mapper
|
||||
opt *Options
|
||||
token *api.CaptchaToken
|
||||
ctx context.Context
|
||||
rst *pikpakClient
|
||||
}
|
||||
|
||||
// initialize CaptchaTokenSource from rclone.conf if possible
|
||||
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
|
||||
token := new(api.CaptchaToken)
|
||||
tokenString, ok := m.Get("captcha_token")
|
||||
if !ok || tokenString == "" {
|
||||
fs.Debugf(nil, "failed to read captcha token out of config file")
|
||||
} else {
|
||||
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
|
||||
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
|
||||
}
|
||||
}
|
||||
return &CaptchaTokenSource{
|
||||
m: m,
|
||||
opt: opt,
|
||||
token: token,
|
||||
ctx: ctx,
|
||||
rst: newPikpakClient(getClient(ctx, opt), opt),
|
||||
}
|
||||
}
|
||||
|
||||
// requestToken retrieves captcha token from API
|
||||
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
|
||||
}
|
||||
var info *api.CaptchaToken
|
||||
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
if err == nil && info.ExpiresIn != 0 {
|
||||
// populate to Expiry
|
||||
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
|
||||
cts.token = info // update with a new one
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
|
||||
oldToken := ""
|
||||
if cts.token != nil {
|
||||
oldToken = cts.token.CaptchaToken
|
||||
}
|
||||
action := "GET:/drive/v1/about"
|
||||
if opts.RootURL == "" && opts.Path != "" {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
|
||||
} else if u, err := url.Parse(opts.RootURL); err == nil {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
|
||||
}
|
||||
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
|
||||
if err := cts.requestToken(cts.ctx, req); err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
|
||||
}
|
||||
|
||||
// put it into rclone.conf
|
||||
tokenBytes, err := json.Marshal(cts.token)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
|
||||
}
|
||||
cts.m.Set("captcha_token", string(tokenBytes))
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
|
||||
// Invalidate resets existing captcha token for a forced refresh
|
||||
func (cts *CaptchaTokenSource) Invalidate() {
|
||||
cts.mu.Lock()
|
||||
cts.token.CaptchaToken = ""
|
||||
cts.mu.Unlock()
|
||||
}
|
||||
|
||||
// Token returns a valid captcha token
|
||||
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
|
||||
cts.mu.Lock()
|
||||
defer cts.mu.Unlock()
|
||||
if cts.token.Valid() {
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
return cts.refreshToken(opts)
|
||||
}
|
||||
|
||||
// pikpakClient wraps rest.Client with a handle of captcha token
|
||||
type pikpakClient struct {
|
||||
opt *Options
|
||||
client *rest.Client
|
||||
captcha *CaptchaTokenSource
|
||||
}
|
||||
|
||||
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
|
||||
// * error handler
|
||||
// * root url
|
||||
// * default headers
|
||||
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
|
||||
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
|
||||
for key, val := range map[string]string{
|
||||
"Referer": "https://mypikpak.com/",
|
||||
"x-client-id": clientID,
|
||||
"x-client-version": clientVersion,
|
||||
"x-device-id": opt.DeviceID,
|
||||
// "x-device-model": "firefox%2F129.0",
|
||||
// "x-device-name": "PC-Firefox",
|
||||
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
|
||||
// "x-net-work-type": "NONE",
|
||||
// "x-os-version": "Win32",
|
||||
// "x-platform-version": "1",
|
||||
// "x-protocol-version": "301",
|
||||
// "x-provider-name": "NONE",
|
||||
// "x-sdk-version": "8.0.3",
|
||||
} {
|
||||
client.SetHeader(key, val)
|
||||
}
|
||||
return &pikpakClient{
|
||||
client: client,
|
||||
opt: opt,
|
||||
}
|
||||
}
|
||||
|
||||
// This should be called right after pikpakClient initialized
|
||||
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
|
||||
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
if c.captcha != nil {
|
||||
token, err := c.captcha.Token(opts)
|
||||
if err != nil || token == "" {
|
||||
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
|
||||
}
|
||||
if opts.ExtraHeaders == nil {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
}
|
||||
opts.ExtraHeaders["x-captcha-token"] = token
|
||||
}
|
||||
return c.client.CallJSON(ctx, opts, request, response)
|
||||
}
|
||||
|
||||
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
|
||||
return c.client.Call(ctx, opts)
|
||||
}
|
||||
|
|
|
@ -7,8 +7,6 @@ package pikpak
|
|||
|
||||
// md5sum is not always available, sometimes given empty.
|
||||
|
||||
// sha1sum used for upload differs from the one with official apps.
|
||||
|
||||
// Trashed files are not restored to the original location when using `batchUntrash`
|
||||
|
||||
// Can't stream without `--vfs-cache-mode=full`
|
||||
|
@ -18,7 +16,6 @@ package pikpak
|
|||
// ------------------------------------------------------------
|
||||
|
||||
// * List() with options starred-only
|
||||
// * uploadByResumable() with configurable chunk-size
|
||||
// * user-configurable list chunk
|
||||
// * backend command: untrash, iscached
|
||||
// * api(event,task)
|
||||
|
@ -26,6 +23,7 @@ package pikpak
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -40,19 +38,23 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
|
@ -64,12 +66,17 @@ import (
|
|||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "YNxT9w7GMdWvEOKa"
|
||||
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
clientID = "YUMx5nI8ZU8Ap8pm"
|
||||
clientVersion = "2.0.0"
|
||||
packageName = "mypikpak.com"
|
||||
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -82,43 +89,53 @@ var (
|
|||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
ClientID: clientID,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Returns OAuthOptions modified for pikpak
|
||||
func pikpakOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Advanced = true
|
||||
} else if opt.Name == config.ConfigClientSecret {
|
||||
opt.Advanced = true
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
|
||||
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
|
||||
// override default client id/secret
|
||||
if id, ok := m.Get("client_id"); ok && id != "" {
|
||||
oauthConfig.ClientID = id
|
||||
}
|
||||
if secret, ok := m.Get("client_secret"); ok && secret != "" {
|
||||
oauthConfig.ClientSecret = secret
|
||||
if opt.Username == "" {
|
||||
return errors.New("no username")
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
|
||||
}
|
||||
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
|
||||
// new device id if necessary
|
||||
if len(opt.DeviceID) != 32 {
|
||||
opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
}
|
||||
req := map[string]string{
|
||||
"username": opt.Username,
|
||||
"password": pass,
|
||||
"client_id": clientID,
|
||||
}
|
||||
var token api.Token
|
||||
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
|
||||
rst.captcha.Invalidate()
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
|
||||
}
|
||||
t := &oauth2.Token{
|
||||
AccessToken: token.AccessToken,
|
||||
TokenType: token.TokenType,
|
||||
RefreshToken: token.RefreshToken,
|
||||
Expiry: token.Expiry(),
|
||||
}
|
||||
return oauthutil.PutToken(name, m, t, false)
|
||||
}
|
||||
|
||||
|
@ -157,7 +174,7 @@ func init() {
|
|||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
|
@ -167,6 +184,18 @@ func init() {
|
|||
Help: "Pikpak password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "device_id",
|
||||
Help: "Device ID used for authorization.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_agent",
|
||||
Default: defaultUserAgent,
|
||||
Advanced: true,
|
||||
Help: fmt.Sprintf(`HTTP user agent for pikpak.
|
||||
|
||||
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
@ -191,6 +220,42 @@ Fill in for rclone to use a non root folder as its starting point.
|
|||
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size for multipart uploads.
|
||||
|
||||
Large files will be uploaded in chunks of this size.
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" * "--pikpak-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently for multipart uploads.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--pikpak-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: defaultUploadConcurrency,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
@ -209,7 +274,7 @@ Fill in for rclone to use a non root folder as its starting point.
|
|||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -217,10 +282,15 @@ Fill in for rclone to use a non root folder as its starting point.
|
|||
type Options struct {
|
||||
Username string `config:"user"`
|
||||
Password string `config:"pass"`
|
||||
UserID string `config:"user_id"` // only available during runtime
|
||||
DeviceID string `config:"device_id"`
|
||||
UserAgent string `config:"user_agent"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
|
@ -230,7 +300,7 @@ type Fs struct {
|
|||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
rst *rest.Client // the connection to the server
|
||||
rst *pikpakClient // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootFolderID string // the id of the root folder
|
||||
|
@ -249,6 +319,7 @@ type Object struct {
|
|||
modTime time.Time // modification time of the object
|
||||
mimeType string // The object MIME type
|
||||
parent string // ID of the parent directories
|
||||
gcid string // custom hash of the object
|
||||
md5sum string // md5sum of the object
|
||||
link *api.Link // link to download the object
|
||||
linkMu *sync.Mutex
|
||||
|
@ -386,6 +457,12 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||
} else if apiErr.Reason == "file_space_not_enough" {
|
||||
// "file_space_not_enough" (8): Storage space is not enough
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
|
||||
// "captcha_invalid" (9): Verification code is invalid
|
||||
// This error occurred on the POST:/drive/v1/files endpoint
|
||||
// when a zero-byte file was uploaded with an invalid captcha token
|
||||
f.rst.captcha.Invalidate()
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -409,13 +486,36 @@ func errorHandler(resp *http.Response) error {
|
|||
return errResponse
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
// Override few config settings and create a client
|
||||
newCtx, ci := fs.AddConfig(ctx)
|
||||
ci.UserAgent = opt.UserAgent
|
||||
return fshttp.NewClient(newCtx)
|
||||
}
|
||||
|
||||
// newClientWithPacer sets a new http/rest client with a pacer to Fs
|
||||
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
|
||||
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
|
||||
var ts *oauthutil.TokenSource
|
||||
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// parse user_id from oauth access token for later use
|
||||
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
|
||||
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
|
||||
info := struct {
|
||||
UserID string `json:"sub,omitempty"`
|
||||
}{}
|
||||
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
|
||||
f.opt.UserID = info.UserID
|
||||
}
|
||||
}
|
||||
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
return nil
|
||||
}
|
||||
|
@ -430,6 +530,9 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, fmt.Errorf("chunk size must be at least %s", minChunkSize)
|
||||
}
|
||||
|
||||
root := parsePath(path)
|
||||
|
||||
|
@ -446,7 +549,18 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||
NoMultiThreading: true, // can't have multiple threads downloading
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// new device id if necessary
|
||||
if len(f.opt.DeviceID) != 32 {
|
||||
f.opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", f.opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
|
||||
}
|
||||
|
||||
if err := f.newClientWithPacer(ctx); err != nil {
|
||||
// re-authorize if necessary
|
||||
if strings.Contains(err.Error(), "invalid_grant") {
|
||||
return f, f.reAuthorize(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -872,19 +986,21 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/trash:empty",
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/trash:empty",
|
||||
}
|
||||
info := struct {
|
||||
TaskID string `json:"task_id"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.Call(ctx, &opts)
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't empty trash: %w", err)
|
||||
}
|
||||
return nil
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// Move the object
|
||||
|
@ -970,6 +1086,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
parent: dirID,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
linkMu: new(sync.Mutex),
|
||||
|
@ -1002,7 +1119,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
// Create temporary object - still missing id, mimeType, gcid, md5sum
|
||||
dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1014,23 +1131,22 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
// Manually update info of moved object to save API calls
|
||||
dstObj.id = srcObj.id
|
||||
dstObj.mimeType = srcObj.mimeType
|
||||
dstObj.gcid = srcObj.gcid
|
||||
dstObj.md5sum = srcObj.md5sum
|
||||
dstObj.hasMetaData = true
|
||||
|
||||
var info *api.File
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
info, err = f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Update info
|
||||
info, err = f.getFile(ctx, dstObj.id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't update moved file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// copy objects
|
||||
|
@ -1068,7 +1184,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
// Create temporary object - still missing id, mimeType, gcid, md5sum
|
||||
dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1082,6 +1198,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
// Update info of the copied object with new parent but source name
|
||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
} else if err = dstObj.setMetaData(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Can't copy and change name in one step so we have to check if we have
|
||||
// the correct name after copy
|
||||
|
@ -1096,16 +1218,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Update info
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
@ -1143,41 +1256,45 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
|||
return
|
||||
}
|
||||
|
||||
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, resumable *api.Resumable, options ...fs.OpenOption) (err error) {
|
||||
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable) (err error) {
|
||||
p := resumable.Params
|
||||
endpoint := strings.Join(strings.Split(p.Endpoint, ".")[1:], ".") // "mypikpak.com"
|
||||
|
||||
cfg := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken),
|
||||
Region: aws.String("pikpak"),
|
||||
Endpoint: &endpoint,
|
||||
}
|
||||
sess, err := session.NewSession(cfg)
|
||||
// Create a credentials provider
|
||||
creds := credentials.NewStaticCredentialsProvider(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken)
|
||||
|
||||
cfg, err := awsconfig.LoadDefaultConfig(ctx,
|
||||
awsconfig.WithCredentialsProvider(creds),
|
||||
awsconfig.WithRegion("pikpak"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
uploader := s3manager.NewUploader(sess)
|
||||
// Upload input parameters
|
||||
uParams := &s3manager.UploadInput{
|
||||
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = aws.String("https://mypikpak.com/")
|
||||
})
|
||||
partSize := chunksize.Calculator(name, size, int(manager.MaxUploadParts), f.opt.ChunkSize)
|
||||
|
||||
// Create an uploader with custom options
|
||||
uploader := manager.NewUploader(client, func(u *manager.Uploader) {
|
||||
u.PartSize = int64(partSize)
|
||||
u.Concurrency = f.opt.UploadConcurrency
|
||||
})
|
||||
// Perform an upload
|
||||
_, err = uploader.Upload(ctx, &s3.PutObjectInput{
|
||||
Bucket: &p.Bucket,
|
||||
Key: &p.Key,
|
||||
Body: in,
|
||||
}
|
||||
// Perform upload with options different than the those in the Uploader.
|
||||
_, err = uploader.UploadWithContext(ctx, uParams, func(u *s3manager.Uploader) {
|
||||
// TODO can be user-configurable
|
||||
u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str string, size int64, options ...fs.OpenOption) (*api.File, error) {
|
||||
func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string, size int64, options ...fs.OpenOption) (info *api.File, err error) {
|
||||
// determine upload type
|
||||
uploadType := api.UploadTypeResumable
|
||||
if size >= 0 && size < int64(5*fs.Mebi) {
|
||||
uploadType = api.UploadTypeForm
|
||||
}
|
||||
// if size >= 0 && size < int64(5*fs.Mebi) {
|
||||
// uploadType = api.UploadTypeForm
|
||||
// }
|
||||
// stop using uploadByForm() cause it is not as reliable as uploadByResumable() for a large number of small files
|
||||
|
||||
// request upload ticket to API
|
||||
req := api.RequestNewFile{
|
||||
|
@ -1186,38 +1303,53 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
|
|||
ParentID: parentIDForRequest(dirID),
|
||||
FolderType: "NORMAL",
|
||||
Size: size,
|
||||
Hash: strings.ToUpper(sha1Str),
|
||||
Hash: strings.ToUpper(gcid),
|
||||
UploadType: uploadType,
|
||||
}
|
||||
if uploadType == api.UploadTypeResumable {
|
||||
req.Resumable = map[string]string{"provider": "PROVIDER_ALIYUN"}
|
||||
}
|
||||
newfile, err := f.requestNewFile(ctx, &req)
|
||||
new, err := f.requestNewFile(ctx, &req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create a new file: %w", err)
|
||||
}
|
||||
if newfile.File == nil {
|
||||
return nil, fmt.Errorf("invalid response: %+v", newfile)
|
||||
} else if newfile.File.Phase == api.PhaseTypeComplete {
|
||||
if new.File == nil {
|
||||
return nil, fmt.Errorf("invalid response: %+v", new)
|
||||
} else if new.File.Phase == api.PhaseTypeComplete {
|
||||
// early return; in case of zero-byte objects
|
||||
return newfile.File, nil
|
||||
if acc, ok := in.(*accounting.Account); ok && acc != nil {
|
||||
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
|
||||
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
|
||||
acc.ServerSideTransferStart()
|
||||
acc.ServerSideCopyEnd(size)
|
||||
}
|
||||
return new.File, nil
|
||||
}
|
||||
|
||||
if uploadType == api.UploadTypeForm && newfile.Form != nil {
|
||||
err = f.uploadByForm(ctx, in, req.Name, size, newfile.Form, options...)
|
||||
} else if uploadType == api.UploadTypeResumable && newfile.Resumable != nil {
|
||||
err = f.uploadByResumable(ctx, in, newfile.Resumable, options...)
|
||||
defer atexit.OnError(&err, func() {
|
||||
fs.Debugf(leaf, "canceling upload: %v", err)
|
||||
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
|
||||
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
||||
}
|
||||
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
|
||||
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
||||
}
|
||||
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
|
||||
time.Sleep(taskWaitTime)
|
||||
})()
|
||||
|
||||
if uploadType == api.UploadTypeForm && new.Form != nil {
|
||||
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
|
||||
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
|
||||
err = f.uploadByResumable(ctx, in, leaf, size, new.Resumable)
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to proceed upload: %+v", newfile)
|
||||
err = fmt.Errorf("no method available for uploading: %+v", new)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload: %w", err)
|
||||
}
|
||||
// refresh uploaded file info
|
||||
// Compared to `newfile.File` this upgrades several fields...
|
||||
// audit, links, modified_time, phase, revision, and web_content_link
|
||||
return f.getFile(ctx, newfile.File.ID)
|
||||
return new.File, f.waitTask(ctx, new.Task.ID)
|
||||
}
|
||||
|
||||
// Put the object
|
||||
|
@ -1441,6 +1573,7 @@ func (o *Object) setMetaData(info *api.File) (err error) {
|
|||
} else {
|
||||
o.parent = info.ParentID
|
||||
}
|
||||
o.gcid = info.Hash
|
||||
o.md5sum = info.Md5Checksum
|
||||
if info.Links.ApplicationOctetStream != nil {
|
||||
o.link = info.Links.ApplicationOctetStream
|
||||
|
@ -1470,22 +1603,11 @@ func (o *Object) setMetaDataWithLink(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// fetch download link with retry scheme
|
||||
// 1 initial attempt and 2 retries are reasonable based on empirical analysis
|
||||
retries := 2
|
||||
for i := 1; i <= retries+1; i++ {
|
||||
info, err := o.fs.getFile(ctx, o.id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch download link: %w", err)
|
||||
}
|
||||
if err = o.setMetaData(info); err == nil && o.link.Valid() {
|
||||
return nil
|
||||
}
|
||||
if i <= retries {
|
||||
time.Sleep(time.Duration(200*i) * time.Millisecond)
|
||||
}
|
||||
info, err := o.fs.getFile(ctx, o.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New("can't download - no link to download")
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
|
@ -1525,9 +1647,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.md5sum == "" {
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.md5sum), nil
|
||||
}
|
||||
|
||||
|
@ -1619,14 +1738,14 @@ func (o *Object) open(ctx context.Context, url string, options ...fs.OpenOption)
|
|||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
return nil, errors.New("can't download: no id")
|
||||
}
|
||||
if o.size == 0 {
|
||||
// zero-byte objects may have no download link
|
||||
return io.NopCloser(bytes.NewBuffer([]byte(nil))), nil
|
||||
}
|
||||
if err = o.setMetaDataWithLink(ctx); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("can't download: %w", err)
|
||||
}
|
||||
return o.open(ctx, o.link.URL, options...)
|
||||
}
|
||||
|
@ -1651,25 +1770,34 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
|||
return err
|
||||
}
|
||||
|
||||
// Calculate sha1sum; grabbed from package jottacloud
|
||||
hashStr, err := src.Hash(ctx, hash.SHA1)
|
||||
if err != nil || hashStr == "" {
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
var wrap accounting.WrapFn
|
||||
in, wrap = accounting.UnWrap(in)
|
||||
var cleanup func()
|
||||
hashStr, in, cleanup, err = readSHA1(in, size, int64(o.fs.opt.HashMemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate SHA1: %w", err)
|
||||
// Calculate gcid; grabbed from package jottacloud
|
||||
gcid, err := o.fs.getGcid(ctx, src)
|
||||
if err != nil || gcid == "" {
|
||||
fs.Debugf(o, "calculating gcid: %v", err)
|
||||
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
// No buffering; directly calculate gcid from source
|
||||
rc, err := srcObj.Open(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open src: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
|
||||
if gcid, err = calcGcid(rc, srcObj.Size()); err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
}
|
||||
} else {
|
||||
var cleanup func()
|
||||
gcid, in, cleanup, err = readGcid(in, size, int64(o.fs.opt.HashMemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
}
|
||||
}
|
||||
// Wrap the accounting back onto the stream
|
||||
in = wrap(in)
|
||||
}
|
||||
fs.Debugf(o, "gcid = %s", gcid)
|
||||
|
||||
if !withTemp {
|
||||
info, err := o.fs.upload(ctx, in, leaf, dirID, hashStr, size, options...)
|
||||
info, err := o.fs.upload(ctx, in, leaf, dirID, gcid, size, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1678,7 +1806,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
|||
|
||||
// We have to fall back to upload + rename
|
||||
tempName := "rcloneTemp" + random.String(8)
|
||||
info, err := o.fs.upload(ctx, in, tempName, dirID, hashStr, size, options...)
|
||||
info, err := o.fs.upload(ctx, in, tempName, dirID, gcid, size, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
397
backend/pixeldrain/api_client.go
Normal file
397
backend/pixeldrain/api_client.go
Normal file
|
@ -0,0 +1,397 @@
|
|||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// FilesystemPath is the object which is returned from the pixeldrain API when
|
||||
// running the stat command on a path. It includes the node information for all
|
||||
// the members of the path and for all the children of the requested directory.
|
||||
type FilesystemPath struct {
|
||||
Path []FilesystemNode `json:"path"`
|
||||
BaseIndex int `json:"base_index"`
|
||||
Children []FilesystemNode `json:"children"`
|
||||
}
|
||||
|
||||
// Base returns the base node of the path, this is the node that the path points
|
||||
// to
|
||||
func (fsp *FilesystemPath) Base() FilesystemNode {
|
||||
return fsp.Path[fsp.BaseIndex]
|
||||
}
|
||||
|
||||
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
|
||||
// a Path or Children slice. The Node is also returned as response from update
|
||||
// commands, if requested
|
||||
type FilesystemNode struct {
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Created time.Time `json:"created"`
|
||||
Modified time.Time `json:"modified"`
|
||||
ModeOctal string `json:"mode_octal"`
|
||||
|
||||
// File params
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileType string `json:"file_type"`
|
||||
SHA256Sum string `json:"sha256_sum"`
|
||||
|
||||
// ID is only filled in when the file/directory is publicly shared
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
|
||||
// from the API are on chronological order from old to new. A change log can be
|
||||
// requested for any directory or file, but change logging needs to be enabled
|
||||
// with the update API before any log entries will be made. Changes are logged
|
||||
// for 24 hours after logging was enabled. Each time a change log is requested
|
||||
// the timer is reset to 24 hours.
|
||||
type ChangeLog []ChangeLogEntry
|
||||
|
||||
// ChangeLogEntry is a single entry in a directory's change log. It contains the
|
||||
// time at which the change occurred. The path relative to the requested
|
||||
// directory and the action that was performend (update, move or delete). In
|
||||
// case of a move operation the new path of the file is stored in the path_new
|
||||
// field
|
||||
type ChangeLogEntry struct {
|
||||
Time time.Time `json:"time"`
|
||||
Path string `json:"path"`
|
||||
PathNew string `json:"path_new"`
|
||||
Action string `json:"action"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// UserInfo contains information about the logged in user
|
||||
type UserInfo struct {
|
||||
Username string `json:"username"`
|
||||
Subscription SubscriptionType `json:"subscription"`
|
||||
StorageSpaceUsed int64 `json:"storage_space_used"`
|
||||
}
|
||||
|
||||
// SubscriptionType contains information about a subscription type. It's not the
|
||||
// active subscription itself, only the properties of the subscription. Like the
|
||||
// perks and cost
|
||||
type SubscriptionType struct {
|
||||
Name string `json:"name"`
|
||||
StorageSpace int64 `json:"storage_space"`
|
||||
}
|
||||
|
||||
// APIError is the error type returned by the pixeldrain API
|
||||
type APIError struct {
|
||||
StatusCode string `json:"value"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e APIError) Error() string { return e.StatusCode }
|
||||
|
||||
// Generalized errors which are caught in our own handlers and translated to
|
||||
// more specific errors from the fs package.
|
||||
var (
|
||||
errNotFound = errors.New("pd api: path not found")
|
||||
errExists = errors.New("pd api: node already exists")
|
||||
errAuthenticationFailed = errors.New("pd api: authentication failed")
|
||||
)
|
||||
|
||||
func apiErrorHandler(resp *http.Response) (err error) {
|
||||
var e APIError
|
||||
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
|
||||
return fmt.Errorf("failed to parse error json: %w", err)
|
||||
}
|
||||
|
||||
// We close the body here so that the API handlers can be sure that the
|
||||
// response body is not still open when an error was returned
|
||||
if err = resp.Body.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close resp body: %w", err)
|
||||
}
|
||||
|
||||
if e.StatusCode == "path_not_found" {
|
||||
return errNotFound
|
||||
} else if e.StatusCode == "directory_not_empty" {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
} else if e.StatusCode == "node_already_exists" {
|
||||
return errExists
|
||||
} else if e.StatusCode == "authentication_failed" {
|
||||
return errAuthenticationFailed
|
||||
} else if e.StatusCode == "permission_denied" {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be
|
||||
// retried. It returns the err as a convenience so it can be used as the return
|
||||
// value in the pacer function
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
|
||||
// can understand.
|
||||
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
|
||||
params = make(url.Values)
|
||||
|
||||
if modified, ok := meta["mtime"]; ok {
|
||||
params.Set("modified", modified)
|
||||
}
|
||||
if created, ok := meta["btime"]; ok {
|
||||
params.Set("created", created)
|
||||
}
|
||||
if mode, ok := meta["mode"]; ok {
|
||||
params.Set("mode", mode)
|
||||
}
|
||||
if shared, ok := meta["shared"]; ok {
|
||||
params.Set("shared", shared)
|
||||
}
|
||||
if loggingEnabled, ok := meta["logging_enabled"]; ok {
|
||||
params.Set("logging_enabled", loggingEnabled)
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
// nodeToObject converts a single FilesystemNode API response to an object. The
|
||||
// node is usually a single element from a directory listing
|
||||
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
|
||||
// Trim the path prefix. The path prefix is hidden from rclone during all
|
||||
// operations. Saving it here would confuse rclone a lot. So instead we
|
||||
// strip it here and add it back for every API request we need to perform
|
||||
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
|
||||
return &Object{fs: f, base: node}
|
||||
}
|
||||
|
||||
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
|
||||
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
|
||||
}
|
||||
|
||||
func (f *Fs) escapePath(p string) (out string) {
|
||||
// Add the path prefix, encode all the parts and combine them together
|
||||
var parts = strings.Split(f.pathPrefix+p, "/")
|
||||
for i := range parts {
|
||||
parts[i] = url.PathEscape(parts[i])
|
||||
}
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
func (f *Fs) put(
|
||||
ctx context.Context,
|
||||
path string,
|
||||
body io.Reader,
|
||||
meta fs.Metadata,
|
||||
options []fs.OpenOption,
|
||||
) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(meta)
|
||||
|
||||
// Tell the server to automatically create parent directories if they don't
|
||||
// exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: f.escapePath(path),
|
||||
Body: body,
|
||||
Parameters: params,
|
||||
Options: options,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
Options: options,
|
||||
})
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
|
||||
return fsp, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
// To receive node info from the pixeldrain API you need to add the
|
||||
// ?stat query. Without it pixeldrain will return the file contents
|
||||
// in the URL points to a file
|
||||
Parameters: url.Values{"stat": []string{""}},
|
||||
},
|
||||
nil,
|
||||
&fsp,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
|
||||
return changeLog, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(""),
|
||||
Parameters: url.Values{
|
||||
"change_log": []string{""},
|
||||
"start": []string{start.Format(time.RFC3339Nano)},
|
||||
"end": []string{end.Format(time.RFC3339Nano)},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
&changeLog,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(fields)
|
||||
params.Set("action", "update")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(path),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(dir),
|
||||
MultipartParams: url.Values{"action": []string{"mkdirall"}},
|
||||
NoResponse: true,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
|
||||
|
||||
// Renames a file on the server side. Can be used for both directories and files
|
||||
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
// This is not a pixeldrain FS, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
|
||||
// Path is not in the same root dir, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
}
|
||||
|
||||
var params = paramsFromMetadata(meta)
|
||||
params.Set("action", "rename")
|
||||
|
||||
// The target is always in our own filesystem so here we use our
|
||||
// own pathPrefix
|
||||
params.Set("target", f.pathPrefix+to)
|
||||
|
||||
// Create parent directories if the parent directory of the file
|
||||
// does not exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
// Important: We use the source FS path prefix here
|
||||
Path: srcFs.escapePath(from),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
|
||||
var params url.Values
|
||||
if recursive {
|
||||
// Tell the server to recursively delete all child files
|
||||
params = url.Values{"recursive": []string{"true"}}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: f.escapePath(path),
|
||||
Parameters: params,
|
||||
NoResponse: true,
|
||||
},
|
||||
nil, nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
|
||||
return user, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
// The default RootURL points at the filesystem endpoint. We can't
|
||||
// use that to request user information. So here we override it to
|
||||
// the user endpoint
|
||||
RootURL: f.opt.APIURL + "/user",
|
||||
},
|
||||
nil,
|
||||
&user,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
567
backend/pixeldrain/pixeldrain.go
Normal file
567
backend/pixeldrain/pixeldrain.go
Normal file
|
@ -0,0 +1,567 @@
|
|||
// Package pixeldrain provides an interface to the Pixeldrain object storage
|
||||
// system.
|
||||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = time.RFC3339Nano
|
||||
minSleep = pacer.MinSleep(10 * time.Millisecond)
|
||||
maxSleep = pacer.MaxSleep(1 * time.Second)
|
||||
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "pixeldrain",
|
||||
Description: "Pixeldrain Filesystem",
|
||||
NewFs: NewFs,
|
||||
Config: nil,
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: "API key for your pixeldrain account.\n" +
|
||||
"Found on https://pixeldrain.com/user/api_keys.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Root of the filesystem to use.\n\n" +
|
||||
"Set to 'me' to use your personal filesystem. " +
|
||||
"Set to a shared directory ID to use a shared directory.",
|
||||
Default: "me",
|
||||
}, {
|
||||
Name: "api_url",
|
||||
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
|
||||
"this at default. It is only intended to be changed for testing purposes.",
|
||||
Default: "https://pixeldrain.com/api",
|
||||
Advanced: true,
|
||||
Required: true,
|
||||
}},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"mode": {
|
||||
Help: "File mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "755",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
},
|
||||
Help: "Pixeldrain supports file modes and creation times.",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
APIURL string `config:"api_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote, as given to NewFS
|
||||
root string // the path we are working on, as given to NewFS
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer
|
||||
loggedIn bool // if the user is authenticated
|
||||
|
||||
// Pathprefix is the directory we're working in. The pathPrefix is stripped
|
||||
// from every API response containing a path. The pathPrefix always begins
|
||||
// and ends with a slash for concatenation convenience
|
||||
pathPrefix string
|
||||
}
|
||||
|
||||
// Object describes a pixeldrain file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
base FilesystemNode // the node this object references
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Set the path prefix. This is the path to the root directory on the
|
||||
// server. We add it to each request and strip it from each response because
|
||||
// rclone does not want to see it
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
|
||||
// The root URL equates to https://pixeldrain.com/api/filesystem during
|
||||
// normal operation. API handlers need to manually add the pathPrefix to
|
||||
// each request
|
||||
f.srv.SetRoot(opt.APIURL + "/filesystem")
|
||||
|
||||
// If using an APIKey, set the Authorization header
|
||||
if len(opt.APIKey) > 0 {
|
||||
f.srv.SetUserPass("", opt.APIKey)
|
||||
|
||||
// Check if credentials are correct
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user data: %w", err)
|
||||
}
|
||||
|
||||
f.loggedIn = true
|
||||
|
||||
fs.Infof(f,
|
||||
"Logged in as '%s', subscription '%s', storage limit %d",
|
||||
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
|
||||
)
|
||||
}
|
||||
|
||||
if !f.loggedIn && opt.RootFolderID == "me" {
|
||||
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
|
||||
}
|
||||
|
||||
// Satisfy TestFsIsFile. This test expects that we throw an error if the
|
||||
// filesystem root is a file
|
||||
fsp, err := f.stat(ctx, "")
|
||||
if err != errNotFound && err != nil {
|
||||
// It doesn't matter if the root directory does not exist, as long as it
|
||||
// is not a file. This is what the test dictates
|
||||
return f, err
|
||||
} else if err == nil && fsp.Base().Type == "file" {
|
||||
// The filesystem root is a file, rclone wants us to set the root to the
|
||||
// parent directory
|
||||
f.root = path.Dir(f.root)
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fsp, err := f.stat(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "file" {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, len(fsp.Children))
|
||||
for i := range fsp.Children {
|
||||
if fsp.Children[i].Type == "dir" {
|
||||
entries[i] = f.nodeToDirectory(fsp.Children[i])
|
||||
} else {
|
||||
entries[i] = f.nodeToObject(fsp.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fsp, err := f.stat(ctx, remote)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "dir" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return f.nodeToObject(fsp.Base()), nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get object metadata")
|
||||
}
|
||||
|
||||
// Overwrite the mtime if it was not already set in the metadata
|
||||
if _, ok := meta["mtime"]; !ok {
|
||||
if meta == nil {
|
||||
meta = make(fs.Metadata)
|
||||
}
|
||||
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
|
||||
}
|
||||
|
||||
node, err := f.put(ctx, src.Remote(), in, meta, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to put object: %w", err)
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.mkdir(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
// Spec says we do not return an error if the directory already exists
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, false)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration { return time.Millisecond }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, true)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// This is not a pixeldrain object. Can't move
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return nil, fs.ErrorCantMove
|
||||
} else if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return fs.ErrorCantDirMove
|
||||
} else if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
// If the bucket ID is not /me we need to explicitly enable change logging
|
||||
// for this directory or file
|
||||
if f.pathPrefix != "/me/" {
|
||||
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
go f.changeNotify(ctx, notify, newInterval)
|
||||
}
|
||||
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
var ticker = time.NewTicker(<-newInterval)
|
||||
var lastPoll = time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case dur, ok := <-newInterval:
|
||||
if !ok {
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Polling changes at an interval of %s", dur)
|
||||
ticker.Reset(dur)
|
||||
|
||||
case t := <-ticker.C:
|
||||
clog, err := f.changeLog(ctx, lastPoll, t)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range clog {
|
||||
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
|
||||
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
|
||||
|
||||
if clog[i].Type == "dir" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
|
||||
} else if clog[i].Type == "file" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
|
||||
}
|
||||
}
|
||||
|
||||
lastPoll = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Put already supports streaming so we just use that
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the mtime metadata on a directory
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
|
||||
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
return err
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fsn.ID != "" {
|
||||
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
|
||||
|
||||
if user.Subscription.StorageSpace > -1 {
|
||||
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
if err == nil {
|
||||
o.base.Modified = modTime
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return o.fs.read(ctx, o.base.Path, options)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// Copy the parameters and update the object
|
||||
o.base.Modified = src.ModTime(ctx)
|
||||
o.base.FileSize = src.Size()
|
||||
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
|
||||
_, err = o.fs.Put(ctx, in, o, options...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.base.Path, false)
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the SHA-256 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.base.SHA256Sum, nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.base.Modified
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.base.FileSize
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known, or "" if not
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.base.FileType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return fs.Metadata{
|
||||
"mode": o.base.ModeOctal,
|
||||
"mtime": o.base.Modified.Format(timeFormat),
|
||||
"btime": o.base.Created.Format(timeFormat),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify that all the interfaces are implemented correctly
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Info = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.DirEntry = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
)
|
18
backend/pixeldrain/pixeldrain_test.go
Normal file
18
backend/pixeldrain/pixeldrain_test.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Test pixeldrain filesystem interface
|
||||
package pixeldrain_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pixeldrain"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPixeldrain:",
|
||||
NilObject: (*pixeldrain.Object)(nil),
|
||||
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
|
||||
})
|
||||
}
|
|
@ -285,6 +285,9 @@ func getConfigMap(m configmap.Mapper) (uid, accessToken, refreshToken, saltedKey
|
|||
}
|
||||
_saltedKeyPass = saltedKeyPass
|
||||
|
||||
// empty strings are considered "ok" by m.Get, which is not true business-wise
|
||||
ok = accessToken != "" && uid != "" && refreshToken != "" && saltedKeyPass != ""
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -349,7 +349,7 @@ func (f *Fs) Root() string {
|
|||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return f.description
|
||||
return f.description + " at " + f.root
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
|
|
|
@ -13,7 +13,8 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// flags
|
||||
|
@ -82,15 +83,18 @@ func main() {
|
|||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
`)
|
||||
|
||||
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||
genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||
genSetFrom(new(types.Object), new(types.ObjectVersion))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||
|
|
1121
backend/s3/s3.go
1121
backend/s3/s3.go
File diff suppressed because it is too large
Load diff
|
@ -5,15 +5,17 @@ import (
|
|||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
@ -58,7 +60,17 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||
// "tier" - read only
|
||||
// "btime" - read only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
// Cloudflare insists on decompressing `Content-Encoding: gzip` unless
|
||||
// `Cache-Control: no-transform` is supplied. This is a deviation from
|
||||
// AWS but we fudge the tests here rather than breaking peoples
|
||||
// expectations of what Cloudflare does.
|
||||
//
|
||||
// This can always be overridden by using
|
||||
// `--header-upload "Cache-Control: no-transform"`
|
||||
if f.opt.Provider == "Cloudflare" {
|
||||
metadata["cache-control"] = "no-transform"
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
@ -131,20 +143,20 @@ func TestVersionLess(t *testing.T) {
|
|||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
a, b *s3.ObjectVersion
|
||||
a, b *types.ObjectVersion
|
||||
want bool
|
||||
}{
|
||||
{a: nil, b: nil, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t2}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key2, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
} {
|
||||
got := versionLess(test.a, test.b)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
|
@ -157,24 +169,24 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
versions []*s3.ObjectVersion
|
||||
markers []*s3.DeleteMarkerEntry
|
||||
want []*s3.ObjectVersion
|
||||
versions []types.ObjectVersion
|
||||
markers []types.DeleteMarkerEntry
|
||||
want []types.ObjectVersion
|
||||
}{
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{},
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
versions: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
|
@ -182,14 +194,14 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
want: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
|
@ -198,7 +210,7 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
versions: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
|
@ -208,13 +220,13 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
want: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
|
@ -399,22 +411,23 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
|||
// quirk is set correctly
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.rootBucket,
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
||||
LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint),
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
_, err := f.c.CreateBucket(ctx, &req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
var errString string
|
||||
var awsError smithy.APIError
|
||||
if err == nil {
|
||||
errString = "No Error"
|
||||
} else if awsErr, ok := err.(awserr.Error); ok {
|
||||
errString = awsErr.Code()
|
||||
} else if errors.As(err, &awsError) {
|
||||
errString = awsError.ErrorCode()
|
||||
} else {
|
||||
assert.Fail(t, "Unknown error %T %v", err, err)
|
||||
}
|
||||
|
|
|
@ -4,12 +4,14 @@ package s3
|
|||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
|
@ -54,20 +56,16 @@ func TestAWSDualStackOption(t *testing.T) {
|
|||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if !strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||
|
@ -27,6 +30,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output
|
|||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
|
@ -41,8 +45,8 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers
|
|||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||
// setFrom_typesObjectVersion_typesDeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_typesObjectVersion_typesDeleteMarkerEntry(a *types.ObjectVersion, b *types.DeleteMarkerEntry) {
|
||||
a.IsLatest = b.IsLatest
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
|
@ -60,10 +64,11 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
|||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
// setFrom_typesObject_typesObjectVersion copies matching elements from a to b
|
||||
func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
|
@ -71,7 +76,6 @@ func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
|||
a.Owner = b.Owner
|
||||
a.RestoreStatus = b.RestoreStatus
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||
|
@ -82,6 +86,7 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
|||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -96,8 +101,9 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
|||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -111,7 +117,6 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -132,6 +137,7 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
|||
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.CopySource = b.CopySource
|
||||
a.Key = b.Key
|
||||
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||
|
@ -141,7 +147,6 @@ func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput,
|
|||
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||
a.Key = b.Key
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
|
@ -166,6 +171,7 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
|||
a.ETag = b.ETag
|
||||
a.Expiration = b.Expiration
|
||||
a.Expires = b.Expires
|
||||
a.ExpiresString = b.ExpiresString
|
||||
a.LastModified = b.LastModified
|
||||
a.Metadata = b.Metadata
|
||||
a.MissingMeta = b.MissingMeta
|
||||
|
@ -183,12 +189,14 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
|||
a.StorageClass = b.StorageClass
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -202,7 +210,6 @@ func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipart
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -232,6 +239,7 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
|||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -246,8 +254,9 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
|||
|
||||
// setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -261,7 +270,6 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
|
@ -10,6 +11,9 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
|
@ -36,10 +40,17 @@ var s3ParamsToSign = map[string]struct{}{
|
|||
"response-content-encoding": {},
|
||||
}
|
||||
|
||||
// sign signs requests using v2 auth
|
||||
// Implement HTTPSignerV4 interface
|
||||
type v2Signer struct {
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using v2 auth.
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Cobbled together from goamz and aws-sdk-go.
|
||||
//
|
||||
// Bodged up to compile with AWS SDK v2
|
||||
func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
|
@ -107,11 +118,12 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
|||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
hash := hmac.New(sha1.New, []byte(v2.opt.SecretAccessKey))
|
||||
_, _ = hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
req.Header.Set("Authorization", "AWS "+v2.opt.AccessKeyID+":"+string(signature))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
|
|||
// This is only going to be http errors here
|
||||
return "", fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
if result.Errors != nil && len(result.Errors) > 0 {
|
||||
if len(result.Errors) > 0 {
|
||||
return "", errors.New(strings.Join(result.Errors, ", "))
|
||||
}
|
||||
if result.Token == "" {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue