Compare commits
1 commit
tcl/master
...
fix-http-s
Author | SHA1 | Date | |
---|---|---|---|
|
444a6e6d2d |
1549 changed files with 77889 additions and 270530 deletions
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: community, triage, bug
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--- Provide a general summary of the issue in the Title above -->
|
|
||||||
|
|
||||||
## Expected Behavior
|
|
||||||
<!--- If you're describing a bug, tell us what should happen -->
|
|
||||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
|
||||||
|
|
||||||
## Current Behavior
|
|
||||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
|
||||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
|
||||||
|
|
||||||
## Possible Solution
|
|
||||||
<!--- Not obligatory -->
|
|
||||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
|
||||||
<!--- uncomment the following phrase: -->
|
|
||||||
|
|
||||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
|
||||||
|
|
||||||
## Steps to Reproduce (for bugs)
|
|
||||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
|
||||||
<!--- reproduce this bug. -->
|
|
||||||
|
|
||||||
1.
|
|
||||||
|
|
||||||
## Context
|
|
||||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
|
||||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
|
||||||
|
|
||||||
## Regression
|
|
||||||
<!-- Is this issue a regression? (Yes / No) -->
|
|
||||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
|
||||||
|
|
||||||
## Your Environment
|
|
||||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
|
||||||
* Version used:
|
|
||||||
* Server setup and configuration:
|
|
||||||
* Operating System and version (`uname -a`):
|
|
|
@ -1 +0,0 @@
|
||||||
blank_issues_enabled: false
|
|
|
@ -1,24 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- tcl/master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
builds:
|
|
||||||
name: Builds
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Build binary
|
|
||||||
run: make
|
|
|
@ -1,20 +0,0 @@
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dco:
|
|
||||||
name: DCO
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run commit format checker
|
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
|
||||||
with:
|
|
||||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
|
@ -1,67 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- tcl/master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
cache: true
|
|
||||||
|
|
||||||
- name: Install linters
|
|
||||||
run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
|
||||||
|
|
||||||
- name: Run linters
|
|
||||||
run: make check
|
|
||||||
test:
|
|
||||||
name: Test
|
|
||||||
runs-on: oci-runner
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Tests for the FrostFS backend
|
|
||||||
env:
|
|
||||||
RESTIC_TEST_FUSE: false
|
|
||||||
AIO_IMAGE: truecloudlab/frostfs-aio
|
|
||||||
AIO_VERSION: 1.7.0-nightly.4
|
|
||||||
RCLONE_CONFIG: /config/rclone.conf
|
|
||||||
|
|
||||||
# run only tests related to FrostFS backend
|
|
||||||
run: |-
|
|
||||||
podman-service.sh
|
|
||||||
podman info
|
|
||||||
|
|
||||||
mkdir /config
|
|
||||||
printf "[TestFrostFS]\ntype = frostfs\nendpoint = localhost:8080\nwallet = /config/wallet.json\nplacement_policy = REP 1\nrequest_timeout = 20s\nconnection_timeout = 21s" > /config/rclone.conf
|
|
||||||
|
|
||||||
echo "Run frostfs aio container"
|
|
||||||
docker run -d --net=host --name aio $AIO_IMAGE:$AIO_VERSION --restart always -p 8080:8080
|
|
||||||
|
|
||||||
echo "Wait for frostfs to start"
|
|
||||||
until docker exec aio curl --fail http://localhost:8083 > /dev/null 2>&1; do sleep 0.2; done;
|
|
||||||
|
|
||||||
echo "Issue creds"
|
|
||||||
docker exec aio /usr/bin/issue-creds.sh native
|
|
||||||
echo "Copy wallet"
|
|
||||||
docker cp aio:/config/user-wallet.json /config/wallet.json
|
|
||||||
|
|
||||||
echo "Start tests"
|
|
||||||
go test -v github.com/rclone/rclone/backend/frostfs
|
|
4
.gitattributes
vendored
4
.gitattributes
vendored
|
@ -1,7 +1,3 @@
|
||||||
# Go writes go.mod and go.sum with lf even on windows
|
|
||||||
go.mod text eol=lf
|
|
||||||
go.sum text eol=lf
|
|
||||||
|
|
||||||
# Ignore generated files in GitHub language statistics and diffs
|
# Ignore generated files in GitHub language statistics and diffs
|
||||||
/MANUAL.* linguist-generated=true
|
/MANUAL.* linguist-generated=true
|
||||||
/rclone.1 linguist-generated=true
|
/rclone.1 linguist-generated=true
|
||||||
|
|
128
.github/workflows/build.yml
vendored
128
.github/workflows/build.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.23.0-rc.1'
|
go: '1.21.0-rc.3'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
|
@ -43,14 +43,14 @@ jobs:
|
||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.23.0-rc.1'
|
go: '1.21.0-rc.3'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-latest
|
os: macos-11
|
||||||
go: '>=1.23.0-rc.1'
|
go: '1.21.0-rc.3'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
@ -58,15 +58,15 @@ jobs:
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-latest
|
os: macos-11
|
||||||
go: '>=1.23.0-rc.1'
|
go: '1.21.0-rc.3'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '>=1.23.0-rc.1'
|
go: '1.21.0-rc.3'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
|
@ -76,20 +76,20 @@ jobs:
|
||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.23.0-rc.1'
|
go: '1.21.0-rc.3'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.21
|
- job_name: go1.19
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.21'
|
go: '1.19'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.22
|
- job_name: go1.20
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.22'
|
go: '1.20'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
|
@ -99,12 +99,12 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
@ -124,7 +124,7 @@ jobs:
|
||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
|
@ -137,8 +137,7 @@ jobs:
|
||||||
brew untap --force homebrew/cask
|
brew untap --force homebrew/cask
|
||||||
brew update
|
brew update
|
||||||
brew install --cask macfuse
|
brew install --cask macfuse
|
||||||
brew install git-annex git-annex-remote-rclone
|
if: matrix.os == 'macos-11'
|
||||||
if: matrix.os == 'macos-latest'
|
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
shell: powershell
|
shell: powershell
|
||||||
|
@ -168,6 +167,14 @@ jobs:
|
||||||
printf "\n\nSystem environment:\n\n"
|
printf "\n\nSystem environment:\n\n"
|
||||||
env
|
env
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Build rclone
|
- name: Build rclone
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
@ -209,6 +216,7 @@ jobs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||||
|
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
||||||
make ci_beta
|
make ci_beta
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
|
@ -223,71 +231,21 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Get runner parameters
|
|
||||||
id: get-runner-parameters
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
|
||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Code quality test
|
||||||
|
uses: golangci/golangci-lint-action@v3
|
||||||
|
with:
|
||||||
|
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
# Run govulncheck on the latest go version, the one we build binaries with
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
id: setup-go
|
uses: actions/setup-go@v4
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.23.0-rc.1'
|
go-version: '1.21.0-rc.3'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: false
|
|
||||||
|
|
||||||
- name: Cache
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
~/.cache/golangci-lint
|
|
||||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
|
||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "windows"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "darwin"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "freebsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
|
||||||
uses: golangci/golangci-lint-action@v6
|
|
||||||
env:
|
|
||||||
GOOS: "openbsd"
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
skip-cache: true
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
@ -303,15 +261,23 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with NDK version
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.23.0-rc.1'
|
go-version: '1.21.0-rc.3'
|
||||||
|
|
||||||
|
- name: Go module cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
|
@ -10,35 +10,26 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/${{ github.repository }}
|
images: ghcr.io/${{ github.repository }}
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v2
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
# This is the user that triggered the Workflow. In this case, it will
|
||||||
|
@ -51,12 +42,9 @@ jobs:
|
||||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||||
# for more detailed information.
|
# for more detailed information.
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Show disk usage
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
context: .
|
context: .
|
||||||
|
@ -66,12 +54,8 @@ jobs:
|
||||||
rclone/rclone:beta
|
rclone/rclone:beta
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
cache-from: type=gha, scope=${{ github.workflow }}
|
cache-from: type=gha
|
||||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
cache-to: type=gha,mode=max
|
||||||
provenance: false
|
provenance: false
|
||||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||||
# https://github.com/docker/build-push-action/issues/252
|
# https://github.com/docker/build-push-action/issues/252
|
||||||
- name: Show disk usage
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
|
|
|
@ -10,17 +10,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Get actual patch version
|
- name: Get actual patch version
|
||||||
|
@ -32,27 +23,15 @@ jobs:
|
||||||
- name: Get actual major version
|
- name: Get actual major version
|
||||||
id: actual_major_version
|
id: actual_major_version
|
||||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
|
||||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
|
||||||
- name: Build and publish image
|
- name: Build and publish image
|
||||||
uses: docker/build-push-action@v6
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
context: .
|
imageName: rclone/rclone
|
||||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
push: true
|
publish: true
|
||||||
tags: |
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
rclone/rclone:latest
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
|
||||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
|
||||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
|
||||||
|
|
||||||
build_docker_volume_plugin:
|
build_docker_volume_plugin:
|
||||||
if: github.repository == 'rclone/rclone'
|
if: github.repository == 'rclone/rclone'
|
||||||
|
@ -60,17 +39,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build docker plugin job
|
name: Build docker plugin job
|
||||||
steps:
|
steps:
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish docker plugin
|
- name: Build and publish docker plugin
|
||||||
|
|
15
.github/workflows/notify.yml
vendored
15
.github/workflows/notify.yml
vendored
|
@ -1,15 +0,0 @@
|
||||||
name: Notify users based on issue labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
notify:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
|
||||||
recipients: |
|
|
||||||
Support Contract=@rclone/support
|
|
28
.github/workflows/winget.yml
vendored
28
.github/workflows/winget.yml
vendored
|
@ -1,14 +1,14 @@
|
||||||
name: Publish to Winget
|
name: Publish to Winget
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [released]
|
types: [released]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
publish:
|
publish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: windows-latest # Action can only run on Windows
|
||||||
steps:
|
steps:
|
||||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||||
with:
|
with:
|
||||||
identifier: Rclone.Rclone
|
identifier: Rclone.Rclone
|
||||||
installers-regex: '-windows-\w+\.zip$'
|
installers-regex: '-windows-\w+\.zip$'
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
token: ${{ secrets.WINGET_TOKEN }}
|
||||||
|
|
9
.gitignore
vendored
9
.gitignore
vendored
|
@ -3,20 +3,15 @@ _junk/
|
||||||
rclone
|
rclone
|
||||||
rclone.exe
|
rclone.exe
|
||||||
build
|
build
|
||||||
/docs/public/
|
docs/public
|
||||||
/docs/.hugo_build.lock
|
|
||||||
/docs/static/img/logos/
|
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
.history
|
.history
|
||||||
.vscode
|
|
||||||
*.test
|
*.test
|
||||||
|
*.log
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
fuzz-build.zip
|
||||||
*.orig
|
*.orig
|
||||||
*.rej
|
*.rej
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
__pycache__
|
__pycache__
|
||||||
.DS_Store
|
|
||||||
resource_windows_*.syso
|
|
||||||
.devcontainer
|
|
||||||
|
|
100
.golangci.yml
100
.golangci.yml
|
@ -13,7 +13,6 @@ linters:
|
||||||
- stylecheck
|
- stylecheck
|
||||||
- unused
|
- unused
|
||||||
- misspell
|
- misspell
|
||||||
- gocritic
|
|
||||||
#- prealloc
|
#- prealloc
|
||||||
#- maligned
|
#- maligned
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
@ -34,111 +33,24 @@ issues:
|
||||||
- staticcheck
|
- staticcheck
|
||||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||||
|
|
||||||
# don't disable the revive messages about comments on exported functions
|
|
||||||
include:
|
|
||||||
- EXC0012
|
|
||||||
- EXC0013
|
|
||||||
- EXC0014
|
|
||||||
- EXC0015
|
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
revive:
|
revive:
|
||||||
# setting rules seems to disable all the rules, so re-enable them here
|
|
||||||
rules:
|
rules:
|
||||||
- name: blank-imports
|
|
||||||
disabled: false
|
|
||||||
- name: context-as-argument
|
|
||||||
disabled: false
|
|
||||||
- name: context-keys-type
|
|
||||||
disabled: false
|
|
||||||
- name: dot-imports
|
|
||||||
disabled: false
|
|
||||||
- name: empty-block
|
|
||||||
disabled: true
|
|
||||||
- name: error-naming
|
|
||||||
disabled: false
|
|
||||||
- name: error-return
|
|
||||||
disabled: false
|
|
||||||
- name: error-strings
|
|
||||||
disabled: false
|
|
||||||
- name: errorf
|
|
||||||
disabled: false
|
|
||||||
- name: exported
|
|
||||||
disabled: false
|
|
||||||
- name: increment-decrement
|
|
||||||
disabled: true
|
|
||||||
- name: indent-error-flow
|
|
||||||
disabled: false
|
|
||||||
- name: package-comments
|
|
||||||
disabled: false
|
|
||||||
- name: range
|
|
||||||
disabled: false
|
|
||||||
- name: receiver-naming
|
|
||||||
disabled: false
|
|
||||||
- name: redefines-builtin-id
|
|
||||||
disabled: true
|
|
||||||
- name: superfluous-else
|
|
||||||
disabled: true
|
|
||||||
- name: time-naming
|
|
||||||
disabled: false
|
|
||||||
- name: unexported-return
|
|
||||||
disabled: false
|
|
||||||
- name: unreachable-code
|
- name: unreachable-code
|
||||||
disabled: true
|
disabled: true
|
||||||
- name: unused-parameter
|
- name: unused-parameter
|
||||||
disabled: true
|
disabled: true
|
||||||
- name: var-declaration
|
- name: empty-block
|
||||||
disabled: false
|
disabled: true
|
||||||
- name: var-naming
|
- name: redefines-builtin-id
|
||||||
disabled: false
|
disabled: true
|
||||||
|
- name: superfluous-else
|
||||||
|
disabled: true
|
||||||
stylecheck:
|
stylecheck:
|
||||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||||
gocritic:
|
|
||||||
# Enable all default checks with some exceptions and some additions (commented).
|
|
||||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
|
||||||
disable-all: true
|
|
||||||
enabled-checks:
|
|
||||||
#- appendAssign # Enabled by default
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCall
|
|
||||||
- badCond
|
|
||||||
#- captLocal # Enabled by default
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
#- commentFormatting # Enabled by default
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
#- exitAfterDefer # Enabled by default
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
#- ifElseChain # Enabled by default
|
|
||||||
- mapKey
|
|
||||||
- newDeref
|
|
||||||
- offBy1
|
|
||||||
- regexpMust
|
|
||||||
- ruleguard # Not enabled by default
|
|
||||||
#- singleCaseSwitch # Enabled by default
|
|
||||||
- sloppyLen
|
|
||||||
- sloppyTypeAssert
|
|
||||||
- switchTrue
|
|
||||||
- typeSwitchVar
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- wrapperFunc
|
|
||||||
settings:
|
|
||||||
ruleguard:
|
|
||||||
rules: "${configDir}/bin/rules.go"
|
|
||||||
|
|
316
CONTRIBUTING.md
316
CONTRIBUTING.md
|
@ -1,8 +1,8 @@
|
||||||
# Contributing to rclone
|
# Contributing to rclone #
|
||||||
|
|
||||||
This is a short guide on how to contribute things to rclone.
|
This is a short guide on how to contribute things to rclone.
|
||||||
|
|
||||||
## Reporting a bug
|
## Reporting a bug ##
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug
|
If you've just got a question or aren't sure if you've found a bug
|
||||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||||
|
@ -12,13 +12,13 @@ When filing an issue, please include the following information if
|
||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
- Rclone version (e.g. output from `rclone version`)
|
* Rclone version (e.g. output from `rclone version`)
|
||||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a new feature or bug fix
|
## Submitting a new feature or bug fix ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
- Add [unit tests](#testing) for a new feature.
|
* Add [unit tests](#testing) for a new feature.
|
||||||
- Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
|
@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
|
|
||||||
## Using Git and GitHub
|
## Using Git and GitHub ##
|
||||||
|
|
||||||
### Committing your changes
|
### Committing your changes ###
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
|
||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
### Replacing your previously pushed commits
|
### Replacing your previously pushed commits ###
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
|
||||||
|
|
||||||
git push --force origin my-new-feature
|
git push --force origin my-new-feature
|
||||||
|
|
||||||
### Basing your changes on the latest master
|
### Basing your changes on the latest master ###
|
||||||
|
|
||||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
|
@ -149,21 +149,13 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
|
||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||||
|
|
||||||
### GitHub Continuous Integration
|
### GitHub Continuous Integration ###
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing
|
## Testing ##
|
||||||
|
|
||||||
### Code quality tests
|
### Quick testing ###
|
||||||
|
|
||||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
|
||||||
|
|
||||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
|
||||||
|
|
||||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
|
||||||
|
|
||||||
### Quick testing
|
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
@ -176,7 +168,7 @@ You can also use `make`, if supported by your platform
|
||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||||
|
|
||||||
### Backend testing
|
### Backend testing ###
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
|
@ -209,9 +201,9 @@ altogether with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backends drive
|
test_all -backend drive
|
||||||
|
|
||||||
### Full integration testing
|
### Full integration testing ###
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
@ -226,56 +218,55 @@ The commands may require some extra go packages which you can install with
|
||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation ##
|
||||||
|
|
||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
- backend - the rclone backends for interfacing to cloud providers -
|
* backend - the rclone backends for interfacing to cloud providers -
|
||||||
- all - import this to load all the cloud providers
|
* all - import this to load all the cloud providers
|
||||||
- ...providers
|
* ...providers
|
||||||
- bin - scripts for use while building or maintaining rclone
|
* bin - scripts for use while building or maintaining rclone
|
||||||
- cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
- all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
- ...commands
|
* ...commands
|
||||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
- docs - the documentation and website
|
* docs - the documentation and website
|
||||||
- content - adjust these docs only - everything else is autogenerated
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
- command - these are auto-generated - edit the corresponding .go file
|
* command - these are auto-generated - edit the corresponding .go file
|
||||||
- fs - main rclone definitions - minimal amount of code
|
* fs - main rclone definitions - minimal amount of code
|
||||||
- accounting - bandwidth limiting and statistics
|
* accounting - bandwidth limiting and statistics
|
||||||
- asyncreader - an io.Reader which reads ahead
|
* asyncreader - an io.Reader which reads ahead
|
||||||
- config - manage the config file and flags
|
* config - manage the config file and flags
|
||||||
- driveletter - detect if a name is a drive letter
|
* driveletter - detect if a name is a drive letter
|
||||||
- filter - implements include/exclude filtering
|
* filter - implements include/exclude filtering
|
||||||
- fserrors - rclone specific error handling
|
* fserrors - rclone specific error handling
|
||||||
- fshttp - http handling for rclone
|
* fshttp - http handling for rclone
|
||||||
- fspath - path handling for rclone
|
* fspath - path handling for rclone
|
||||||
- hash - defines rclone's hash types and functions
|
* hash - defines rclone's hash types and functions
|
||||||
- list - list a remote
|
* list - list a remote
|
||||||
- log - logging facilities
|
* log - logging facilities
|
||||||
- march - iterates directories in lock step
|
* march - iterates directories in lock step
|
||||||
- object - in memory Fs objects
|
* object - in memory Fs objects
|
||||||
- operations - primitives for sync, e.g. Copy, Move
|
* operations - primitives for sync, e.g. Copy, Move
|
||||||
- sync - sync directories
|
* sync - sync directories
|
||||||
- walk - walk a directory
|
* walk - walk a directory
|
||||||
- fstest - provides integration test framework
|
* fstest - provides integration test framework
|
||||||
- fstests - integration tests for the backends
|
* fstests - integration tests for the backends
|
||||||
- mockdir - mocks an fs.Directory
|
* mockdir - mocks an fs.Directory
|
||||||
- mockobject - mocks an fs.Object
|
* mockobject - mocks an fs.Object
|
||||||
- test_all - Runs integration tests for everything
|
* test_all - Runs integration tests for everything
|
||||||
- graphics - the images used in the website, etc.
|
* graphics - the images used in the website, etc.
|
||||||
- lib - libraries used by the backend
|
* lib - libraries used by the backend
|
||||||
- atexit - register functions to run when rclone exits
|
* atexit - register functions to run when rclone exits
|
||||||
- dircache - directory ID to name caching
|
* dircache - directory ID to name caching
|
||||||
- oauthutil - helpers for using oauth
|
* oauthutil - helpers for using oauth
|
||||||
- pacer - retries with backoff and paces operations
|
* pacer - retries with backoff and paces operations
|
||||||
- readers - a selection of useful io.Readers
|
* readers - a selection of useful io.Readers
|
||||||
- rest - a thin abstraction over net/http for REST
|
* rest - a thin abstraction over net/http for REST
|
||||||
- librclone - in memory interface to rclone's API for embedding rclone
|
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
|
||||||
|
|
||||||
## Writing Documentation
|
## Writing Documentation ##
|
||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
|
@ -286,22 +277,22 @@ alphabetical order.
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field.
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
* Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
- This text will be used for the command-line flag help.
|
* This text will be used for the command-line flag help.
|
||||||
- It will be combined with other information, such as any default value,
|
* It will be combined with other information, such as any default value,
|
||||||
and the result will look odd if not written as a single sentence.
|
and the result will look odd if not written as a single sentence.
|
||||||
- It should end with a period/full stop character, which will be shown
|
* It should end with a period/full stop character, which will be shown
|
||||||
in docs but automatically removed when producing the flag help.
|
in docs but automatically removed when producing the flag help.
|
||||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||||
- Like with docs generated from Markdown, a single line break is ignored
|
* Like with docs generated from Markdown, a single line break is ignored
|
||||||
and two line breaks creates a new paragraph.
|
and two line breaks creates a new paragraph.
|
||||||
- This text will be shown to the user in `rclone config`
|
* This text will be shown to the user in `rclone config`
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
and in the docs (where it will be added by `make backenddocs`,
|
||||||
normally run some time before next release).
|
normally run some time before next release).
|
||||||
- To create options of enumeration type use the `Examples:` field.
|
* To create options of enumeration type use the `Examples:` field.
|
||||||
- Each example value have their own `Help:` field, but they are treated
|
* Each example value have their own `Help:` field, but they are treated
|
||||||
a bit different than the main option help text. They will be shown
|
a bit different than the main option help text. They will be shown
|
||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
|
@ -321,12 +312,12 @@ combined unmodified with other information (such as any default value).
|
||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
for small changes in the docs which makes it very easy.
|
for small changes in the docs which makes it very easy.
|
||||||
|
|
||||||
## Making a release
|
## Making a release ##
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
file.
|
file.
|
||||||
|
|
||||||
## Commit messages
|
## Commit messages ##
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change that a user (not a developer) of rclone would like to read, and
|
||||||
|
@ -367,7 +358,7 @@ error fixing the hang.
|
||||||
Fixes #1498
|
Fixes #1498
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding a dependency
|
## Adding a dependency ##
|
||||||
|
|
||||||
rclone uses the [go
|
rclone uses the [go
|
||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
|
@ -379,7 +370,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
go get github.com/ncw/new_dependency
|
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
|
@ -387,15 +378,15 @@ go docs linked above), but don't unless you really need to.
|
||||||
Please check in the changes generated by `go mod` including `go.mod`
|
Please check in the changes generated by `go mod` including `go.mod`
|
||||||
and `go.sum` in the same commit as your other changes.
|
and `go.sum` in the same commit as your other changes.
|
||||||
|
|
||||||
## Updating a dependency
|
## Updating a dependency ##
|
||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
go get golang.org/x/crypto
|
GO111MODULE=on go get -u golang.org/x/crypto
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
## Updating all the dependencies
|
## Updating all the dependencies ##
|
||||||
|
|
||||||
In order to update all the dependencies then run `make update`. This
|
In order to update all the dependencies then run `make update`. This
|
||||||
just uses the go modules to update all the modules to their latest
|
just uses the go modules to update all the modules to their latest
|
||||||
|
@ -404,7 +395,7 @@ stable release. Check in the changes in a single commit as above.
|
||||||
This should be done early in the release cycle to pick up new versions
|
This should be done early in the release cycle to pick up new versions
|
||||||
of packages in time for them to get some testing.
|
of packages in time for them to get some testing.
|
||||||
|
|
||||||
## Updating a backend
|
## Updating a backend ##
|
||||||
|
|
||||||
If you update a backend then please run the unit tests and the
|
If you update a backend then please run the unit tests and the
|
||||||
integration tests for that backend.
|
integration tests for that backend.
|
||||||
|
@ -419,133 +410,82 @@ integration tests.
|
||||||
|
|
||||||
The next section goes into more detail about the tests.
|
The next section goes into more detail about the tests.
|
||||||
|
|
||||||
## Writing a new backend
|
## Writing a new backend ##
|
||||||
|
|
||||||
Choose a name. The docs here will use `remote` as an example.
|
Choose a name. The docs here will use `remote` as an example.
|
||||||
|
|
||||||
Note that in rclone terminology a file system backend is called a
|
Note that in rclone terminology a file system backend is called a
|
||||||
remote or an fs.
|
remote or an fs.
|
||||||
|
|
||||||
### Research
|
Research
|
||||||
|
|
||||||
- Look at the interfaces defined in `fs/types.go`
|
* Look at the interfaces defined in `fs/types.go`
|
||||||
- Study one or more of the existing remotes
|
* Study one or more of the existing remotes
|
||||||
|
|
||||||
### Getting going
|
Getting going
|
||||||
|
|
||||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
* box is a good one to start from if you have a directory-based remote
|
||||||
- b2 is a good one to start from if you have a bucket-based remote
|
* b2 is a good one to start from if you have a bucket-based remote
|
||||||
- Add your remote to the imports in `backend/all/all.go`
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
* `rclone purge -v TestRemote:rclone-info`
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
* open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
### Guidelines for a speedy merge
|
Unit tests
|
||||||
|
|
||||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
* Create a config entry called `TestRemote` for the unit tests to use
|
||||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
* Make sure all tests pass with `go test -v`
|
||||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
|
||||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
|
||||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
|
||||||
|
|
||||||
### Unit tests
|
Integration tests
|
||||||
|
|
||||||
- Create a config entry called `TestRemote` for the unit tests to use
|
* Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
* Once you've done that then you can use the integration test framework from the project root:
|
||||||
- Make sure all tests pass with `go test -v`
|
* go install ./...
|
||||||
|
* test_all -backends remote
|
||||||
### Integration tests
|
|
||||||
|
|
||||||
- Add your backend to `fstest/test_all/config.yaml`
|
|
||||||
- Once you've done that then you can use the integration test framework from the project root:
|
|
||||||
- go install ./...
|
|
||||||
- test_all -backends remote
|
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
- Make sure integration tests pass with
|
* Make sure integration tests pass with
|
||||||
- `cd fs/operations`
|
* `cd fs/operations`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- `cd fs/sync`
|
* `cd fs/sync`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- If your remote defines `ListR` check with this also
|
* If your remote defines `ListR` check with this also
|
||||||
- `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
### Backend documentation
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
|
|
||||||
Add your backend to the docs - you'll need to pick an icon for it from
|
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
* update them with `make backenddocs` - revert any changes in other backends
|
||||||
- `docs/content/overview.md` - overview docs
|
* `docs/content/overview.md` - overview docs
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
* `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
* `docs/content/_index.md` - front page of rclone.org
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
- `bin/make_manual.py` - add the page to the `docs` constant
|
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## Adding a new s3 provider
|
## Writing a plugin ##
|
||||||
|
|
||||||
It is quite easy to add a new S3 provider to rclone.
|
|
||||||
|
|
||||||
You'll need to modify the following files
|
|
||||||
|
|
||||||
- `backend/s3/s3.go`
|
|
||||||
- Add the provider to `providerOption` at the top of the file
|
|
||||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
|
||||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
|
||||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
|
||||||
- `docs/content/s3.md`
|
|
||||||
- Add the provider at the top of the page.
|
|
||||||
- Add a section about the provider linked from there.
|
|
||||||
- Add a transcript of a trial `rclone config` session
|
|
||||||
- Edit the transcript to remove things which might change in subsequent versions
|
|
||||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
|
||||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
|
||||||
- `README.md` - this is the home page in github
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
|
|
||||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
|
||||||
alphabetical order by `Provider` name, but with `AWS` first and
|
|
||||||
`Other` last.
|
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
|
||||||
in the web browser and the links (internal and external) all work.
|
|
||||||
|
|
||||||
Once you've written the code, test `rclone config` works to your
|
|
||||||
satisfaction, and check the integration tests work `go test -v -remote
|
|
||||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
|
||||||
pass. Some providers just can't pass the tests with control characters
|
|
||||||
in the names so if these fail and the provider doesn't support
|
|
||||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
|
||||||
`SetTier` test may also fail on non AWS providers.
|
|
||||||
|
|
||||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
|
||||||
|
|
||||||
## Writing a plugin
|
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
### Usage
|
Usage
|
||||||
|
|
||||||
- Naming
|
- Naming
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
|
@ -560,7 +500,7 @@ This is useful if you can't merge your changes upstream or don't want to maintai
|
||||||
- Plugins must be compiled against the exact version of rclone to work.
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
|
|
||||||
### Building
|
Building
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
FROM golang:alpine AS builder
|
FROM golang AS builder
|
||||||
|
|
||||||
COPY . /go/src/github.com/rclone/rclone/
|
COPY . /go/src/github.com/rclone/rclone/
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
WORKDIR /go/src/github.com/rclone/rclone/
|
||||||
|
|
||||||
RUN apk add --no-cache make bash gawk git
|
|
||||||
RUN \
|
RUN \
|
||||||
CGO_ENABLED=0 \
|
CGO_ENABLED=0 \
|
||||||
make
|
make
|
||||||
|
|
|
@ -19,10 +19,6 @@ Current active maintainers of rclone are:
|
||||||
| wiserain | @wiserain | pikpak backend |
|
| wiserain | @wiserain | pikpak backend |
|
||||||
| albertony | @albertony | |
|
| albertony | @albertony | |
|
||||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
|
||||||
| nielash | @nielash | bisync |
|
|
||||||
| Dan McArdle | @dmcardle | gitannex |
|
|
||||||
| Sam Harrison | @childish-sambino | filescom |
|
|
||||||
|
|
||||||
**This is a work in progress Draft**
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
|
|
48937
MANUAL.html
generated
48937
MANUAL.html
generated
File diff suppressed because it is too large
Load diff
20493
MANUAL.txt
generated
20493
MANUAL.txt
generated
File diff suppressed because it is too large
Load diff
50
Makefile
50
Makefile
|
@ -30,37 +30,29 @@ ifdef RELEASE_TAG
|
||||||
TAG := $(RELEASE_TAG)
|
TAG := $(RELEASE_TAG)
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_OS := $(shell go env GOOS)
|
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := beta.rclone.org:
|
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||||
endif
|
endif
|
||||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
|
||||||
|
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
ifeq ($(GO_OS),windows)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
|
||||||
ifeq ($(GO_OS),windows)
|
|
||||||
rm resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
|
|
||||||
test_all:
|
test_all:
|
||||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
|
@ -74,10 +66,6 @@ btest:
|
||||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
||||||
@echo "Copied markdown of beta release to clip board"
|
@echo "Copied markdown of beta release to clip board"
|
||||||
|
|
||||||
btesth:
|
|
||||||
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
|
|
||||||
@echo "Copied beta release in HTML to clip board"
|
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
|
@ -88,13 +76,13 @@ test: rclone test_all
|
||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||||
|
|
||||||
racequicktest:
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||||
|
|
||||||
compiletest:
|
compiletest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
|
@ -104,12 +92,16 @@ check: rclone
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||||
|
|
||||||
|
# Get the release dependencies we only install on Windows
|
||||||
|
release_dep_windows:
|
||||||
|
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
|
@ -154,7 +146,7 @@ rcdocs: rclone
|
||||||
|
|
||||||
install: rclone
|
install: rclone
|
||||||
install -d ${DESTDIR}/usr/bin
|
install -d ${DESTDIR}/usr/bin
|
||||||
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean ./...
|
go clean ./...
|
||||||
|
@ -168,7 +160,7 @@ website:
|
||||||
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
||||||
|
|
||||||
upload_website: website
|
upload_website: website
|
||||||
rclone -v sync docs/public www.rclone.org:
|
rclone -v sync docs/public memstore:www-rclone-org
|
||||||
|
|
||||||
upload_test_website: website
|
upload_test_website: website
|
||||||
rclone -P sync docs/public test-rclone-org:
|
rclone -P sync docs/public test-rclone-org:
|
||||||
|
@ -195,8 +187,8 @@ check_sign:
|
||||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
rclone -P copy build/ downloads.rclone.org:/$(TAG)
|
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
|
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||||
|
|
||||||
upload_github:
|
upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
|
@ -206,7 +198,7 @@ cross: doc
|
||||||
|
|
||||||
beta:
|
beta:
|
||||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
log_since_last_release:
|
log_since_last_release:
|
||||||
|
@ -219,18 +211,18 @@ ci_upload:
|
||||||
sudo chown -R $$USER build
|
sudo chown -R $$USER build
|
||||||
find build -type l -delete
|
find build -type l -delete
|
||||||
gzip -r9v build
|
gzip -r9v build
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||||
|
|
||||||
ci_beta:
|
ci_beta:
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
|
@ -239,7 +231,7 @@ fetch_binaries:
|
||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
cd docs && hugo server -v -w --disableFastRender
|
||||||
|
|
||||||
tag: retag doc
|
tag: retag doc
|
||||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||||
|
|
33
README.md
33
README.md
|
@ -1,21 +1,3 @@
|
||||||
<div align="center">
|
|
||||||
<sup>Special thanks to our sponsor:</sup>
|
|
||||||
<br>
|
|
||||||
<br>
|
|
||||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
|
||||||
<div>
|
|
||||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
|
||||||
</div>
|
|
||||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
|
||||||
<div>
|
|
||||||
<sup>Visit warp.dev to learn more.</sup>
|
|
||||||
</div>
|
|
||||||
</a>
|
|
||||||
<br>
|
|
||||||
<hr>
|
|
||||||
</div>
|
|
||||||
<br>
|
|
||||||
|
|
||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||||
|
|
||||||
|
@ -41,6 +23,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
|
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
|
@ -55,18 +38,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
|
@ -74,15 +53,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
|
||||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
|
||||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
|
@ -96,16 +71,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
|
@ -116,7 +87,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
|
@ -145,7 +115,6 @@ These backends adapt or modify other storage providers
|
||||||
* Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
|
||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
* Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
|
|
110
RELEASE.md
110
RELEASE.md
|
@ -37,45 +37,16 @@ This file describes how to make the various kinds of releases
|
||||||
|
|
||||||
## Update dependencies
|
## Update dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies.
|
Early in the next release cycle update the dependencies
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
* `make updatedirect`
|
* make updatedirect
|
||||||
* `make GOTAGS=cmount`
|
* make
|
||||||
* `make compiletest`
|
* git commit -a -v
|
||||||
* Fix anything which doesn't compile at this point and commit changes here
|
* make update
|
||||||
* `git commit -a -v -m "build: update all dependencies"`
|
* make
|
||||||
|
|
||||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
|
||||||
then go to manual mode. `go1.20` here is the lowest supported version
|
|
||||||
in the `go.mod`.
|
|
||||||
|
|
||||||
```
|
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
|
||||||
go mod tidy -go=1.20 -compat=1.20
|
|
||||||
```
|
|
||||||
|
|
||||||
If the `go mod tidy` fails use the output from it to remove the
|
|
||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
|
||||||
done
|
|
||||||
|
|
||||||
```
|
|
||||||
git co go.mod go.sum
|
|
||||||
```
|
|
||||||
|
|
||||||
And try again.
|
|
||||||
|
|
||||||
Optionally upgrade the direct and indirect dependencies. This is very
|
|
||||||
likely to fail if the manual method was used abve - in that case
|
|
||||||
ignore it as it is too time consuming to fix.
|
|
||||||
|
|
||||||
* `make update`
|
|
||||||
* `make GOTAGS=cmount`
|
|
||||||
* `make compiletest`
|
|
||||||
* roll back any updates which didn't compile
|
* roll back any updates which didn't compile
|
||||||
* `git commit -a -v --amend`
|
* git commit -a -v --amend
|
||||||
* **NB** watch out for this changing the default go version in `go.mod`
|
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
Note that `make update` updates all direct and indirect dependencies
|
||||||
and there can occasionally be forwards compatibility problems with
|
and there can occasionally be forwards compatibility problems with
|
||||||
|
@ -83,9 +54,6 @@ doing that so it may be necessary to roll back dependencies to the
|
||||||
version specified by `make updatedirect` in order to get rclone to
|
version specified by `make updatedirect` in order to get rclone to
|
||||||
build.
|
build.
|
||||||
|
|
||||||
Once it compiles locally, push it on a test branch and commit fixes
|
|
||||||
until the tests pass.
|
|
||||||
|
|
||||||
## Tidy beta
|
## Tidy beta
|
||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
|
@ -122,54 +90,34 @@ Now
|
||||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git push
|
* git push
|
||||||
|
|
||||||
## Sponsor logos
|
|
||||||
|
|
||||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
|
||||||
|
|
||||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
|
||||||
which is a private repo containing artwork from sponsors.
|
|
||||||
|
|
||||||
## Update the website between releases
|
|
||||||
|
|
||||||
Create an update website branch based off the last release
|
|
||||||
|
|
||||||
git co -b update-website
|
|
||||||
|
|
||||||
If the branch already exists, double check there are no commits that need saving.
|
|
||||||
|
|
||||||
Now reset the branch to the last release
|
|
||||||
|
|
||||||
git reset --hard v1.64.0
|
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
|
||||||
|
|
||||||
make upload_test_website
|
|
||||||
|
|
||||||
Check out https://test.rclone.org and when happy
|
|
||||||
|
|
||||||
make upload_website
|
|
||||||
|
|
||||||
Cherry pick any changes back to master and the stable branch if it is active.
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||||
|
or needs to be updated then rebuild like this.
|
||||||
|
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||||
|
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||||
|
|
||||||
```
|
```
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
git co v1.54.1
|
||||||
docker run --rm rclone/rclone:testing version
|
docker pull golang
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
docker buildx create --name actions_builder --use
|
||||||
|
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||||
|
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||||
|
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||||
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
|
docker buildx stop actions_builder
|
||||||
```
|
```
|
||||||
|
|
||||||
To test the multipatform build
|
### Old build for linux/amd64 only
|
||||||
|
|
||||||
```
|
```
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
docker pull golang
|
||||||
```
|
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
|
docker push rclone/rclone:1.52.0
|
||||||
To make a full build then set the tags correctly and add `--push`
|
docker push rclone/rclone:1.52
|
||||||
|
docker push rclone/rclone:1
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
docker push rclone/rclone:latest
|
||||||
|
|
||||||
```
|
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
|
||||||
```
|
```
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v1.68.2
|
v1.64.0
|
||||||
|
|
|
@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
||||||
configfile.Install()
|
configfile.Install()
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSetValue(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
config.FileSetValue(remoteName, "remote", root)
|
config.FileSet(remoteName, "remote", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFS(t *testing.T) {
|
func TestNewFS(t *testing.T) {
|
||||||
|
@ -81,12 +81,10 @@ func TestNewFS(t *testing.T) {
|
||||||
for i, gotEntry := range gotEntries {
|
for i, gotEntry := range gotEntries {
|
||||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
if !isDir {
|
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
}
|
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,8 +4,8 @@ package all
|
||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
|
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
|
@ -17,21 +17,16 @@ import (
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/rclone/rclone/backend/filefabric"
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
_ "github.com/rclone/rclone/backend/filescom"
|
|
||||||
_ "github.com/rclone/rclone/backend/frostfs"
|
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/gofile"
|
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
_ "github.com/rclone/rclone/backend/hasher"
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
_ "github.com/rclone/rclone/backend/hidrive"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/rclone/rclone/backend/linkbox"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
|
@ -42,12 +37,10 @@ import (
|
||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/pikpak"
|
_ "github.com/rclone/rclone/backend/pikpak"
|
||||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/rclone/rclone/backend/protondrive"
|
_ "github.com/rclone/rclone/backend/protondrive"
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
_ "github.com/rclone/rclone/backend/qingstor"
|
_ "github.com/rclone/rclone/backend/qingstor"
|
||||||
_ "github.com/rclone/rclone/backend/quatrix"
|
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
|
@ -57,7 +50,6 @@ import (
|
||||||
_ "github.com/rclone/rclone/backend/storj"
|
_ "github.com/rclone/rclone/backend/storj"
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/ulozto"
|
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
_ "github.com/rclone/rclone/backend/uptobox"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
|
|
1369
backend/amazonclouddrive/amazonclouddrive.go
Normal file
1369
backend/amazonclouddrive/amazonclouddrive.go
Normal file
File diff suppressed because it is too large
Load diff
21
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
21
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
// Test AmazonCloudDrive filesystem interface
|
||||||
|
|
||||||
|
//go:build acd
|
||||||
|
// +build acd
|
||||||
|
|
||||||
|
package amazonclouddrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||||
|
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||||
|
fstests.Run(t)
|
||||||
|
}
|
|
@ -1,13 +1,14 @@
|
||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -17,7 +18,6 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -33,6 +33,7 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
"github.com/rclone/rclone/fs/chunksize"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
@ -45,8 +46,10 @@ import (
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/multipart"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/pool"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -67,16 +70,12 @@ const (
|
||||||
emulatorAccount = "devstoreaccount1"
|
emulatorAccount = "devstoreaccount1"
|
||||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||||
|
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||||
|
memoryPoolUseMmap = false
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
|
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
|
||||||
|
|
||||||
// Take this when changing or reading metadata.
|
|
||||||
//
|
|
||||||
// It acts as global metadata lock so we don't bloat Object
|
|
||||||
// with an extra lock that will only very rarely be contended.
|
|
||||||
metadataMu sync.Mutex
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
|
@ -295,10 +294,10 @@ avoid the time out.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "access_tier",
|
Name: "access_tier",
|
||||||
Help: `Access tier of blob: hot, cool, cold or archive.
|
Help: `Access tier of blob: hot, cool or archive.
|
||||||
|
|
||||||
Archived blobs can be restored by setting access tier to hot, cool or
|
Archived blobs can be restored by setting access tier to hot or
|
||||||
cold. Leave blank if you intend to use default access tier, which is
|
cool. Leave blank if you intend to use default access tier, which is
|
||||||
set at account level
|
set at account level
|
||||||
|
|
||||||
If there is no "access tier" specified, rclone doesn't apply any tier.
|
If there is no "access tier" specified, rclone doesn't apply any tier.
|
||||||
|
@ -306,7 +305,7 @@ rclone performs "Set Tier" operation on blobs while uploading, if objects
|
||||||
are not modified, specifying "access tier" to new one will have no effect.
|
are not modified, specifying "access tier" to new one will have no effect.
|
||||||
If blobs are in "archive tier" at remote, trying to perform data transfer
|
If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||||
operations from remote will not be allowed. User should first restore by
|
operations from remote will not be allowed. User should first restore by
|
||||||
tiering blob to "Hot", "Cool" or "Cold".`,
|
tiering blob to "Hot" or "Cool".`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "archive_tier_delete",
|
Name: "archive_tier_delete",
|
||||||
|
@ -338,16 +337,17 @@ to start uploading.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "memory_pool_flush_time",
|
Name: "memory_pool_flush_time",
|
||||||
Default: fs.Duration(time.Minute),
|
Default: memoryPoolFlushTime,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideBoth,
|
Help: `How often internal memory buffer pools will be flushed.
|
||||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
|
||||||
|
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||||
|
This option controls how often unused buffers will be removed from the pool.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "memory_pool_use_mmap",
|
Name: "memory_pool_use_mmap",
|
||||||
Default: false,
|
Default: memoryPoolUseMmap,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideBoth,
|
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
|
@ -401,24 +401,6 @@ rclone does if you know the container exists already.
|
||||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "delete_snapshots",
|
|
||||||
Help: `Set to specify how to deal with snapshots on blob deletion.`,
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "",
|
|
||||||
Help: "By default, the delete operation fails if a blob has snapshots",
|
|
||||||
}, {
|
|
||||||
Value: string(blob.DeleteSnapshotsOptionTypeInclude),
|
|
||||||
Help: "Specify 'include' to remove the root blob and all its snapshots",
|
|
||||||
}, {
|
|
||||||
Value: string(blob.DeleteSnapshotsOptionTypeOnly),
|
|
||||||
Help: "Specify 'only' to remove only the snapshots but keep the root blob.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Default: "",
|
|
||||||
Exclusive: true,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -450,12 +432,13 @@ type Options struct {
|
||||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||||
UseEmulator bool `config:"use_emulator"`
|
UseEmulator bool `config:"use_emulator"`
|
||||||
DisableCheckSum bool `config:"disable_checksum"`
|
DisableCheckSum bool `config:"disable_checksum"`
|
||||||
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||||
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
PublicAccess string `config:"public_access"`
|
PublicAccess string `config:"public_access"`
|
||||||
DirectoryMarkers bool `config:"directory_markers"`
|
DirectoryMarkers bool `config:"directory_markers"`
|
||||||
NoCheckContainer bool `config:"no_check_container"`
|
NoCheckContainer bool `config:"no_check_container"`
|
||||||
NoHeadObject bool `config:"no_head_object"`
|
NoHeadObject bool `config:"no_head_object"`
|
||||||
DeleteSnapshots string `config:"delete_snapshots"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
|
@ -474,6 +457,8 @@ type Fs struct {
|
||||||
cache *bucket.Cache // cache for container creation status
|
cache *bucket.Cache // cache for container creation status
|
||||||
pacer *fs.Pacer // To pace and retry the API calls
|
pacer *fs.Pacer // To pace and retry the API calls
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
|
pool *pool.Pool // memory pool
|
||||||
|
poolSize int64 // size of pages in memory pool
|
||||||
publicAccess container.PublicAccessType // Container Public Access Level
|
publicAccess container.PublicAccessType // Container Public Access Level
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -486,7 +471,7 @@ type Object struct {
|
||||||
size int64 // Size of the object
|
size int64 // Size of the object
|
||||||
mimeType string // Content-Type of the object
|
mimeType string // Content-Type of the object
|
||||||
accessTier blob.AccessTier // Blob Access Tier
|
accessTier blob.AccessTier // Blob Access Tier
|
||||||
meta map[string]string // blob metadata - take metadataMu when accessing
|
meta map[string]string // blob metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
@ -539,7 +524,6 @@ func (o *Object) split() (container, containerPath string) {
|
||||||
func validateAccessTier(tier string) bool {
|
func validateAccessTier(tier string) bool {
|
||||||
return strings.EqualFold(tier, string(blob.AccessTierHot)) ||
|
return strings.EqualFold(tier, string(blob.AccessTierHot)) ||
|
||||||
strings.EqualFold(tier, string(blob.AccessTierCool)) ||
|
strings.EqualFold(tier, string(blob.AccessTierCool)) ||
|
||||||
strings.EqualFold(tier, string(blob.AccessTierCold)) ||
|
|
||||||
strings.EqualFold(tier, string(blob.AccessTierArchive))
|
strings.EqualFold(tier, string(blob.AccessTierArchive))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -669,8 +653,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
if opt.AccessTier == "" {
|
if opt.AccessTier == "" {
|
||||||
opt.AccessTier = string(defaultAccessTier)
|
opt.AccessTier = string(defaultAccessTier)
|
||||||
} else if !validateAccessTier(opt.AccessTier) {
|
} else if !validateAccessTier(opt.AccessTier) {
|
||||||
return nil, fmt.Errorf("supported access tiers are %s, %s, %s and %s",
|
return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
|
||||||
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierCold), string(blob.AccessTierArchive))
|
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierArchive))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !validatePublicAccess((opt.PublicAccess)) {
|
if !validatePublicAccess((opt.PublicAccess)) {
|
||||||
|
@ -687,6 +671,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
cntSVCcache: make(map[string]*container.Client, 1),
|
cntSVCcache: make(map[string]*container.Client, 1),
|
||||||
|
pool: pool.New(
|
||||||
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
|
int(opt.ChunkSize),
|
||||||
|
ci.Transfers,
|
||||||
|
opt.MemoryPoolUseMmap,
|
||||||
|
),
|
||||||
|
poolSize: int64(opt.ChunkSize),
|
||||||
}
|
}
|
||||||
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
|
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
|
@ -711,11 +702,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
ClientOptions: policyClientOptions,
|
ClientOptions: policyClientOptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
|
// Here we auth by setting one of cred, sharedKeyCred or f.svc
|
||||||
var (
|
var (
|
||||||
cred azcore.TokenCredential
|
cred azcore.TokenCredential
|
||||||
sharedKeyCred *service.SharedKeyCredential
|
sharedKeyCred *service.SharedKeyCredential
|
||||||
anonymous = false
|
|
||||||
)
|
)
|
||||||
switch {
|
switch {
|
||||||
case opt.EnvAuth:
|
case opt.EnvAuth:
|
||||||
|
@ -875,9 +865,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
}
|
}
|
||||||
case opt.Account != "":
|
|
||||||
// Anonymous access
|
|
||||||
anonymous = true
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("no authentication method configured")
|
return nil, errors.New("no authentication method configured")
|
||||||
}
|
}
|
||||||
|
@ -907,12 +894,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create client failed: %w", err)
|
return nil, fmt.Errorf("create client failed: %w", err)
|
||||||
}
|
}
|
||||||
} else if anonymous {
|
|
||||||
// Anonymous public access
|
|
||||||
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("create public client failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if f.svc == nil {
|
if f.svc == nil {
|
||||||
|
@ -991,9 +972,6 @@ func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client
|
||||||
|
|
||||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||||
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||||
metadataMu.Lock()
|
|
||||||
defer metadataMu.Unlock()
|
|
||||||
|
|
||||||
// Make sure o.meta is not nil
|
// Make sure o.meta is not nil
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta = make(map[string]string, 1)
|
o.meta = make(map[string]string, 1)
|
||||||
|
@ -1098,7 +1076,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||||
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
|
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
// Don't insert the root directory
|
// Don't insert the root directory
|
||||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
if remote == directory {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// process directory markers as directories
|
// process directory markers as directories
|
||||||
|
@ -1525,7 +1503,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
container, directory := f.split(dir)
|
container, directory := f.split(dir)
|
||||||
// Remove directory marker file
|
// Remove directory marker file
|
||||||
if f.opt.DirectoryMarkers && container != "" && directory != "" {
|
if f.opt.DirectoryMarkers && container != "" && dir != "" {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: dir + "/",
|
remote: dir + "/",
|
||||||
|
@ -1559,10 +1537,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
container, directory := f.split(dir)
|
container, directory := f.split(dir)
|
||||||
if container == "" {
|
if container == "" || directory != "" {
|
||||||
return errors.New("can't purge from root")
|
|
||||||
}
|
|
||||||
if directory != "" {
|
|
||||||
// Delegate to caller if not root of a container
|
// Delegate to caller if not root of a container
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
|
@ -1619,6 +1594,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||||
return f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||||
|
if size == int64(f.opt.ChunkSize) {
|
||||||
|
return f.pool
|
||||||
|
}
|
||||||
|
|
||||||
|
return pool.New(
|
||||||
|
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||||
|
int(size),
|
||||||
|
f.ci.Transfers,
|
||||||
|
f.opt.MemoryPoolUseMmap,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
|
@ -1662,9 +1650,6 @@ func (o *Object) Size() int64 {
|
||||||
|
|
||||||
// Set o.metadata from metadata
|
// Set o.metadata from metadata
|
||||||
func (o *Object) setMetadata(metadata map[string]*string) {
|
func (o *Object) setMetadata(metadata map[string]*string) {
|
||||||
metadataMu.Lock()
|
|
||||||
defer metadataMu.Unlock()
|
|
||||||
|
|
||||||
if len(metadata) > 0 {
|
if len(metadata) > 0 {
|
||||||
// Lower case the metadata
|
// Lower case the metadata
|
||||||
o.meta = make(map[string]string, len(metadata))
|
o.meta = make(map[string]string, len(metadata))
|
||||||
|
@ -1689,9 +1674,6 @@ func (o *Object) setMetadata(metadata map[string]*string) {
|
||||||
|
|
||||||
// Get metadata from o.meta
|
// Get metadata from o.meta
|
||||||
func (o *Object) getMetadata() (metadata map[string]*string) {
|
func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||||
metadataMu.Lock()
|
|
||||||
defer metadataMu.Unlock()
|
|
||||||
|
|
||||||
if len(o.meta) == 0 {
|
if len(o.meta) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1903,7 +1885,12 @@ func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
o.updateMetadataWithModTime(modTime)
|
// Make sure o.meta is not nil
|
||||||
|
if o.meta == nil {
|
||||||
|
o.meta = make(map[string]string, 1)
|
||||||
|
}
|
||||||
|
// Set modTimeKey in it
|
||||||
|
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||||
|
|
||||||
blb := o.getBlobSVC()
|
blb := o.getBlobSVC()
|
||||||
opt := blob.SetMetadataOptions{}
|
opt := blob.SetMetadataOptions{}
|
||||||
|
@ -1929,7 +1916,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
var offset int64
|
var offset int64
|
||||||
var count int64
|
var count int64
|
||||||
if o.AccessTier() == blob.AccessTierArchive {
|
if o.AccessTier() == blob.AccessTierArchive {
|
||||||
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot, cool, cold first")
|
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
|
||||||
}
|
}
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
|
@ -1995,42 +1982,34 @@ func (rs *readSeekCloser) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// record chunk number and id for Close
|
// increment the slice passed in as LSB binary
|
||||||
type azBlock struct {
|
func increment(xs []byte) {
|
||||||
chunkNumber uint64
|
for i, digit := range xs {
|
||||||
id string
|
newDigit := digit + 1
|
||||||
|
xs[i] = newDigit
|
||||||
|
if newDigit >= digit {
|
||||||
|
// exit if no carry
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implements the fs.ChunkWriter interface
|
var warnStreamUpload sync.Once
|
||||||
type azChunkWriter struct {
|
|
||||||
chunkSize int64
|
|
||||||
size int64
|
|
||||||
f *Fs
|
|
||||||
ui uploadInfo
|
|
||||||
blocksMu sync.Mutex // protects the below
|
|
||||||
blocks []azBlock // list of blocks for finalize
|
|
||||||
o *Object
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
// uploadMultipart uploads a file using multipart upload
|
||||||
//
|
//
|
||||||
// Pass in the remote and the src object
|
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||||
// You can also use options to hint at the desired chunk size
|
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
|
||||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
|
||||||
// Temporary Object under construction
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
ui, err := o.prepareUpload(ctx, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate correct partSize
|
// Calculate correct partSize
|
||||||
partSize := f.opt.ChunkSize
|
partSize := o.fs.opt.ChunkSize
|
||||||
totalParts := -1
|
totalParts := -1
|
||||||
size := src.Size()
|
|
||||||
|
// make concurrency machinery
|
||||||
|
concurrency := o.fs.opt.UploadConcurrency
|
||||||
|
if concurrency < 1 {
|
||||||
|
concurrency = 1
|
||||||
|
}
|
||||||
|
tokens := pacer.NewTokenDispenser(concurrency)
|
||||||
|
|
||||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||||
// blocks) and this is bigger than the max uncommitted block
|
// blocks) and this is bigger than the max uncommitted block
|
||||||
|
@ -2044,13 +2023,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||||
// 195GB which seems like a not too unreasonable limit.
|
// 195GB which seems like a not too unreasonable limit.
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
warnStreamUpload.Do(func() {
|
warnStreamUpload.Do(func() {
|
||||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||||
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
|
o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize)
|
partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
|
||||||
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
|
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
|
||||||
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
|
return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
|
||||||
}
|
}
|
||||||
totalParts = int(fs.SizeSuffix(size) / partSize)
|
totalParts = int(fs.SizeSuffix(size) / partSize)
|
||||||
if fs.SizeSuffix(size)%partSize != 0 {
|
if fs.SizeSuffix(size)%partSize != 0 {
|
||||||
|
@ -2060,275 +2039,173 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||||
|
|
||||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
|
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
|
||||||
|
|
||||||
chunkWriter := &azChunkWriter{
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
chunkSize: int64(partSize),
|
// back on after the buffering
|
||||||
size: size,
|
in, wrap := accounting.UnWrap(in)
|
||||||
f: f,
|
|
||||||
ui: ui,
|
|
||||||
o: o,
|
|
||||||
}
|
|
||||||
info = fs.ChunkWriterInfo{
|
|
||||||
ChunkSize: int64(partSize),
|
|
||||||
Concurrency: o.fs.opt.UploadConcurrency,
|
|
||||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
|
||||||
}
|
|
||||||
fs.Debugf(o, "open chunk writer: started multipart upload")
|
|
||||||
return info, chunkWriter, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
// FIXME it would be nice to delete uncommitted blocks
|
||||||
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
|
// See: https://github.com/rclone/rclone/issues/5583
|
||||||
if chunkNumber < 0 {
|
//
|
||||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
// However there doesn't seem to be an easy way of doing this other than
|
||||||
return -1, err
|
// by deleting the target.
|
||||||
}
|
//
|
||||||
|
// This means that a failed upload deletes the target which isn't ideal.
|
||||||
|
//
|
||||||
|
// Uploading a zero length blob and deleting it will remove the
|
||||||
|
// uncommitted blocks I think.
|
||||||
|
//
|
||||||
|
// Could check to see if a file exists already and if it
|
||||||
|
// doesn't then create a 0 length file and delete it to flush
|
||||||
|
// the uncommitted blocks.
|
||||||
|
//
|
||||||
|
// This is what azcopy does
|
||||||
|
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
||||||
|
// defer atexit.OnError(&err, func() {
|
||||||
|
// fs.Debugf(o, "Cancelling multipart upload")
|
||||||
|
// // Code goes here!
|
||||||
|
// })()
|
||||||
|
|
||||||
// Upload the block, with MD5 for check
|
// Upload the chunks
|
||||||
m := md5.New()
|
var (
|
||||||
currentChunkSize, err := io.Copy(m, reader)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
if err != nil {
|
remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
|
||||||
return -1, err
|
position = fs.SizeSuffix(0) // position in file
|
||||||
}
|
memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
|
||||||
// If no data read, don't write the chunk
|
finished = false // set when we have read EOF
|
||||||
if currentChunkSize == 0 {
|
blocks []string // list of blocks for finalize
|
||||||
return 0, nil
|
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||||
}
|
)
|
||||||
md5sum := m.Sum(nil)
|
for part := 0; !finished; part++ {
|
||||||
|
// Get a block of memory from the pool and a token which limits concurrency
|
||||||
|
tokens.Get()
|
||||||
|
buf := memPool.Get()
|
||||||
|
|
||||||
// increment the blockID and save the blocks for finalize
|
free := func() {
|
||||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
memPool.Put(buf) // return the buf
|
||||||
binary.LittleEndian.PutUint64(binaryBlockID[:], uint64(chunkNumber))
|
tokens.Put() // return the token
|
||||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID[:])
|
|
||||||
|
|
||||||
// Save the blockID for the commit
|
|
||||||
w.blocksMu.Lock()
|
|
||||||
w.blocks = append(w.blocks, azBlock{
|
|
||||||
chunkNumber: uint64(chunkNumber),
|
|
||||||
id: blockID,
|
|
||||||
})
|
|
||||||
w.blocksMu.Unlock()
|
|
||||||
|
|
||||||
err = w.f.pacer.Call(func() (bool, error) {
|
|
||||||
// rewind the reader on retry and after reading md5
|
|
||||||
_, err = reader.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
}
|
||||||
options := blockblob.StageBlockOptions{
|
|
||||||
// Specify the transactional md5 for the body, to be validated by the service.
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
|
if gCtx.Err() != nil {
|
||||||
|
free()
|
||||||
|
break
|
||||||
}
|
}
|
||||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
|
||||||
if err != nil {
|
// Read the chunk
|
||||||
if chunkNumber <= 8 {
|
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||||
return w.f.shouldRetry(ctx, err)
|
if err == io.EOF {
|
||||||
|
if n == 0 { // end if no data
|
||||||
|
free()
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// retry all chunks once have done the first few
|
finished = true
|
||||||
return true, err
|
} else if err != nil {
|
||||||
|
free()
|
||||||
|
return fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||||
}
|
}
|
||||||
return false, nil
|
buf = buf[:n]
|
||||||
})
|
|
||||||
if err != nil {
|
// increment the blockID and save the blocks for finalize
|
||||||
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
|
increment(binaryBlockID)
|
||||||
|
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||||
|
blocks = append(blocks, blockID)
|
||||||
|
|
||||||
|
// Transfer the chunk
|
||||||
|
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer free()
|
||||||
|
|
||||||
|
// Upload the block, with MD5 for check
|
||||||
|
md5sum := md5.Sum(buf)
|
||||||
|
transactionalMD5 := md5sum[:]
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
bufferReader := bytes.NewReader(buf)
|
||||||
|
wrappedReader := wrap(bufferReader)
|
||||||
|
rs := readSeekCloser{wrappedReader, bufferReader}
|
||||||
|
options := blockblob.StageBlockOptions{
|
||||||
|
// Specify the transactional md5 for the body, to be validated by the service.
|
||||||
|
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||||
|
}
|
||||||
|
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
|
||||||
|
return o.fs.shouldRetry(ctx, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// ready for next block
|
||||||
|
if size >= 0 {
|
||||||
|
remaining -= partSize
|
||||||
|
}
|
||||||
|
position += partSize
|
||||||
}
|
}
|
||||||
|
err = g.Wait()
|
||||||
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize)
|
if err != nil {
|
||||||
return currentChunkSize, err
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
// Abort the multipart upload.
|
|
||||||
//
|
|
||||||
// FIXME it would be nice to delete uncommitted blocks.
|
|
||||||
//
|
|
||||||
// See: https://github.com/rclone/rclone/issues/5583
|
|
||||||
//
|
|
||||||
// However there doesn't seem to be an easy way of doing this other than
|
|
||||||
// by deleting the target.
|
|
||||||
//
|
|
||||||
// This means that a failed upload deletes the target which isn't ideal.
|
|
||||||
//
|
|
||||||
// Uploading a zero length blob and deleting it will remove the
|
|
||||||
// uncommitted blocks I think.
|
|
||||||
//
|
|
||||||
// Could check to see if a file exists already and if it doesn't then
|
|
||||||
// create a 0 length file and delete it to flush the uncommitted
|
|
||||||
// blocks.
|
|
||||||
//
|
|
||||||
// This is what azcopy does
|
|
||||||
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
|
||||||
func (w *azChunkWriter) Abort(ctx context.Context) error {
|
|
||||||
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close and finalise the multipart upload
|
|
||||||
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
|
||||||
// sort the completed parts by part number
|
|
||||||
sort.Slice(w.blocks, func(i, j int) bool {
|
|
||||||
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create and check a list of block IDs
|
|
||||||
blockIDs := make([]string, len(w.blocks))
|
|
||||||
for i := range w.blocks {
|
|
||||||
if w.blocks[i].chunkNumber != uint64(i) {
|
|
||||||
return fmt.Errorf("internal error: expecting chunkNumber %d but got %d", i, w.blocks[i].chunkNumber)
|
|
||||||
}
|
|
||||||
chunkBytes, err := base64.StdEncoding.DecodeString(w.blocks[i].id)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("internal error: bad block ID: %w", err)
|
|
||||||
}
|
|
||||||
chunkNumber := binary.LittleEndian.Uint64(chunkBytes)
|
|
||||||
if w.blocks[i].chunkNumber != chunkNumber {
|
|
||||||
return fmt.Errorf("internal error: expecting decoded chunkNumber %d but got %d", w.blocks[i].chunkNumber, chunkNumber)
|
|
||||||
}
|
|
||||||
blockIDs[i] = w.blocks[i].id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
options := blockblob.CommitBlockListOptions{
|
options := blockblob.CommitBlockListOptions{
|
||||||
Metadata: w.o.getMetadata(),
|
Metadata: o.getMetadata(),
|
||||||
Tier: parseTier(w.f.opt.AccessTier),
|
Tier: parseTier(o.fs.opt.AccessTier),
|
||||||
HTTPHeaders: &w.ui.httpHeaders,
|
HTTPHeaders: httpHeaders,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalise the upload session
|
// Finalise the upload session
|
||||||
err = w.f.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options)
|
_, err := blb.CommitBlockList(ctx, blocks, &options)
|
||||||
return w.f.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||||
}
|
}
|
||||||
fs.Debugf(w.o, "multipart upload finished")
|
return nil
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var warnStreamUpload sync.Once
|
|
||||||
|
|
||||||
// uploadMultipart uploads a file using multipart upload
|
|
||||||
//
|
|
||||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
|
||||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
|
|
||||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
|
||||||
Open: o.fs,
|
|
||||||
OpenOptions: options,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return ui, err
|
|
||||||
}
|
|
||||||
return chunkWriter.(*azChunkWriter).ui, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadSinglepart uploads a short blob using a single part upload
|
// uploadSinglepart uploads a short blob using a single part upload
|
||||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) {
|
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
|
||||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
|
||||||
// fs.Debugf(o, "Single part upload starting of object %d bytes", size)
|
// fs.Debugf(o, "Single part upload starting of object %d bytes", size)
|
||||||
if size > chunkSize || size < 0 {
|
if size > o.fs.poolSize || size < 0 {
|
||||||
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize)
|
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
rw := multipart.NewRW()
|
buf := o.fs.pool.Get()
|
||||||
defer fs.CheckClose(rw, &err)
|
defer o.fs.pool.Put(buf)
|
||||||
|
|
||||||
n, err := io.CopyN(rw, in, size+1)
|
n, err := readers.ReadFill(in, buf)
|
||||||
|
if err == nil {
|
||||||
|
// Check to see whether in is exactly len(buf) or bigger
|
||||||
|
var buf2 = []byte{0}
|
||||||
|
n2, err2 := readers.ReadFill(in, buf2)
|
||||||
|
if n2 != 0 || err2 != io.EOF {
|
||||||
|
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return fmt.Errorf("single part upload read failed: %w", err)
|
return fmt.Errorf("single part upload read failed: %w", err)
|
||||||
}
|
}
|
||||||
if n != size {
|
if int64(n) != size {
|
||||||
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
|
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
rs := &readSeekCloser{Reader: rw, Seeker: rw}
|
b := bytes.NewReader(buf[:n])
|
||||||
|
rs := &readSeekCloser{Reader: b, Seeker: b}
|
||||||
|
|
||||||
options := blockblob.UploadOptions{
|
options := blockblob.UploadOptions{
|
||||||
Metadata: o.getMetadata(),
|
Metadata: o.getMetadata(),
|
||||||
Tier: parseTier(o.fs.opt.AccessTier),
|
Tier: parseTier(o.fs.opt.AccessTier),
|
||||||
HTTPHeaders: &ui.httpHeaders,
|
HTTPHeaders: httpHeaders,
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
// Don't retry, return a retry error instead
|
||||||
// rewind the reader on retry
|
return o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
_, err = rs.Seek(0, io.SeekStart)
|
_, err = blb.Upload(ctx, rs, &options)
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
_, err = ui.blb.Upload(ctx, rs, &options)
|
|
||||||
return o.fs.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info needed for an upload
|
|
||||||
type uploadInfo struct {
|
|
||||||
blb *blockblob.Client
|
|
||||||
httpHeaders blob.HTTPHeaders
|
|
||||||
isDirMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare the object for upload
|
|
||||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
|
||||||
container, containerPath := o.split()
|
|
||||||
if container == "" || containerPath == "" {
|
|
||||||
return ui, fmt.Errorf("can't upload to root - need a container")
|
|
||||||
}
|
|
||||||
// Create parent dir/bucket if not saving directory marker
|
|
||||||
metadataMu.Lock()
|
|
||||||
_, ui.isDirMarker = o.meta[dirMetaKey]
|
|
||||||
metadataMu.Unlock()
|
|
||||||
if !ui.isDirMarker {
|
|
||||||
err = o.fs.mkdirParent(ctx, o.remote)
|
|
||||||
if err != nil {
|
|
||||||
return ui, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update Mod time
|
|
||||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
|
||||||
if err != nil {
|
|
||||||
return ui, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the HTTP headers for the upload
|
|
||||||
ui.httpHeaders = blob.HTTPHeaders{
|
|
||||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
|
||||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
|
||||||
if !o.fs.opt.DisableCheckSum {
|
|
||||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
|
||||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
|
||||||
if err == nil {
|
|
||||||
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
|
|
||||||
} else {
|
|
||||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply upload options (also allows one to overwrite content-type)
|
|
||||||
for _, option := range options {
|
|
||||||
key, value := option.Header()
|
|
||||||
lowerKey := strings.ToLower(key)
|
|
||||||
switch lowerKey {
|
|
||||||
case "":
|
|
||||||
// ignore
|
|
||||||
case "cache-control":
|
|
||||||
ui.httpHeaders.BlobCacheControl = pString(value)
|
|
||||||
case "content-disposition":
|
|
||||||
ui.httpHeaders.BlobContentDisposition = pString(value)
|
|
||||||
case "content-encoding":
|
|
||||||
ui.httpHeaders.BlobContentEncoding = pString(value)
|
|
||||||
case "content-language":
|
|
||||||
ui.httpHeaders.BlobContentLanguage = pString(value)
|
|
||||||
case "content-type":
|
|
||||||
ui.httpHeaders.BlobContentType = pString(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
|
|
||||||
return ui, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
|
@ -2344,26 +2221,80 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
return errCantUpdateArchiveTierBlobs
|
return errCantUpdateArchiveTierBlobs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
container, containerPath := o.split()
|
||||||
size := src.Size()
|
if container == "" || containerPath == "" {
|
||||||
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize)
|
return fmt.Errorf("can't upload to root - need a container")
|
||||||
var ui uploadInfo
|
}
|
||||||
|
// Create parent dir/bucket if not saving directory marker
|
||||||
if multipartUpload {
|
_, isDirMarker := o.meta[dirMetaKey]
|
||||||
ui, err = o.uploadMultipart(ctx, in, src, options...)
|
if !isDirMarker {
|
||||||
} else {
|
err = o.fs.mkdirParent(ctx, o.remote)
|
||||||
ui, err = o.prepareUpload(ctx, src, options)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
err = o.uploadSinglepart(ctx, in, size, ui)
|
}
|
||||||
|
|
||||||
|
// Update Mod time
|
||||||
|
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||||
|
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the HTTP headers for the upload
|
||||||
|
httpHeaders := blob.HTTPHeaders{
|
||||||
|
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||||
|
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||||
|
if !o.fs.opt.DisableCheckSum {
|
||||||
|
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||||
|
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||||
|
if err == nil {
|
||||||
|
httpHeaders.BlobContentMD5 = sourceMD5bytes
|
||||||
|
} else {
|
||||||
|
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply upload options (also allows one to overwrite content-type)
|
||||||
|
for _, option := range options {
|
||||||
|
key, value := option.Header()
|
||||||
|
lowerKey := strings.ToLower(key)
|
||||||
|
switch lowerKey {
|
||||||
|
case "":
|
||||||
|
// ignore
|
||||||
|
case "cache-control":
|
||||||
|
httpHeaders.BlobCacheControl = pString(value)
|
||||||
|
case "content-disposition":
|
||||||
|
httpHeaders.BlobContentDisposition = pString(value)
|
||||||
|
case "content-encoding":
|
||||||
|
httpHeaders.BlobContentEncoding = pString(value)
|
||||||
|
case "content-language":
|
||||||
|
httpHeaders.BlobContentLanguage = pString(value)
|
||||||
|
case "content-type":
|
||||||
|
httpHeaders.BlobContentType = pString(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blb := o.fs.getBlockBlobSVC(container, containerPath)
|
||||||
|
size := src.Size()
|
||||||
|
multipartUpload := size < 0 || size > o.fs.poolSize
|
||||||
|
|
||||||
|
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||||
|
if multipartUpload {
|
||||||
|
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
|
||||||
|
} else {
|
||||||
|
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh metadata on object
|
// Refresh metadata on object
|
||||||
if !ui.isDirMarker {
|
if !isDirMarker {
|
||||||
o.clearMetaData()
|
o.clearMetaData()
|
||||||
err = o.readMetaData(ctx)
|
err = o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2383,10 +2314,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
blb := o.getBlobSVC()
|
blb := o.getBlobSVC()
|
||||||
opt := blob.DeleteOptions{}
|
//only := blob.DeleteSnapshotsOptionTypeOnly
|
||||||
if o.fs.opt.DeleteSnapshots != "" {
|
opt := blob.DeleteOptions{
|
||||||
action := blob.DeleteSnapshotsOptionType(o.fs.opt.DeleteSnapshots)
|
//DeleteSnapshots: &only,
|
||||||
opt.DeleteSnapshots = &action
|
|
||||||
}
|
}
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blb.Delete(ctx, &opt)
|
_, err := blb.Delete(ctx, &opt)
|
||||||
|
@ -2453,14 +2383,13 @@ func parseTier(tier string) *blob.AccessTier {
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.Purger = &Fs{}
|
_ fs.Purger = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
_ fs.OpenChunkWriter = &Fs{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.GetTierer = &Object{}
|
||||||
_ fs.GetTierer = &Object{}
|
_ fs.SetTierer = &Object{}
|
||||||
_ fs.SetTierer = &Object{}
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
@ -16,3 +17,20 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||||
enabled = f.Features().GetTier
|
enabled = f.Features().GetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIncrement(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in []byte
|
||||||
|
want []byte
|
||||||
|
}{
|
||||||
|
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||||
|
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||||
|
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||||
|
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||||
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||||
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||||
|
} {
|
||||||
|
increment(test.in)
|
||||||
|
assert.Equal(t, test.want, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
//go:build !plan9 && !solaris && !js
|
||||||
|
// +build !plan9,!solaris,!js
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
|
@ -18,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestAzureBlob:",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
|
@ -30,11 +31,11 @@ func TestIntegration2(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
name := "TestAzureBlob"
|
name := "TestAzureBlob:"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: name + ":",
|
RemoteName: name,
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
|
@ -61,7 +62,6 @@ func TestValidateAccessTier(t *testing.T) {
|
||||||
"HOT": {"HOT", true},
|
"HOT": {"HOT", true},
|
||||||
"Hot": {"Hot", true},
|
"Hot": {"Hot", true},
|
||||||
"cool": {"cool", true},
|
"cool": {"cool", true},
|
||||||
"cold": {"cold", true},
|
|
||||||
"archive": {"archive", true},
|
"archive": {"archive", true},
|
||||||
"empty": {"", false},
|
"empty": {"", false},
|
||||||
"unknown": {"unknown", false},
|
"unknown": {"unknown", false},
|
||||||
|
|
|
@ -2,6 +2,6 @@
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js
|
//go:build plan9 || solaris || js
|
||||||
|
// +build plan9 solaris js
|
||||||
|
|
||||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,69 +0,0 @@
|
||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Authentication", f.InternalTestAuth)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestAuth(t *testing.T) {
|
|
||||||
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
|
||||||
shareName := "test-rclone-oct-2023"
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
options *Options
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "ConnectionString",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
ConnectionString: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "AccountAndKey",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
Account: "",
|
|
||||||
Key: "",
|
|
||||||
}},
|
|
||||||
{
|
|
||||||
name: "SASUrl",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
SASURL: "",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
dirName := randomString(10)
|
|
||||||
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
|
||||||
|
|
||||||
func randomString(charCount int) string {
|
|
||||||
strBldr := strings.Builder{}
|
|
||||||
for i := 0; i < charCount; i++ {
|
|
||||||
randPos := rand.Int63n(52)
|
|
||||||
strBldr.WriteByte(chars[randPos])
|
|
||||||
}
|
|
||||||
return strBldr.String()
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
//go:build !plan9 && !js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
var objPtr *Object
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestAzureFiles:",
|
|
||||||
NilObject: objPtr,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,7 +0,0 @@
|
||||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
|
|
||||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
|
||||||
package azurefiles
|
|
|
@ -33,18 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
|
||||||
|
|
||||||
// Bucket describes a B2 bucket
|
// Bucket describes a B2 bucket
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
ID string `json:"bucketId"`
|
ID string `json:"bucketId"`
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleRule is a single lifecycle rule
|
|
||||||
type LifecycleRule struct {
|
|
||||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
|
||||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
|
@ -214,10 +206,9 @@ type FileInfo struct {
|
||||||
|
|
||||||
// CreateBucketRequest is used to create a bucket
|
// CreateBucketRequest is used to create a bucket
|
||||||
type CreateBucketRequest struct {
|
type CreateBucketRequest struct {
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucketRequest is used to create a bucket
|
// DeleteBucketRequest is used to create a bucket
|
||||||
|
@ -340,11 +331,3 @@ type CopyPartRequest struct {
|
||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
|
||||||
type UpdateBucketRequest struct {
|
|
||||||
ID string `json:"bucketId"`
|
|
||||||
AccountID string `json:"accountId"`
|
|
||||||
Type string `json:"bucketType,omitempty"`
|
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimestampEqual(t *testing.T) {
|
func TestTimestampEqual(t *testing.T) {
|
||||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.False(t, emptyT.Equal(emptyT))
|
||||||
assert.False(t, t0.Equal(emptyT))
|
assert.False(t, t0.Equal(emptyT))
|
||||||
assert.False(t, emptyT.Equal(t0))
|
assert.False(t, emptyT.Equal(t0))
|
||||||
assert.False(t, t0.Equal(t1))
|
assert.False(t, t0.Equal(t1))
|
||||||
assert.False(t, t1.Equal(t0))
|
assert.False(t, t1.Equal(t0))
|
||||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t0.Equal(t0))
|
||||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
assert.True(t, t1.Equal(t1))
|
||||||
}
|
}
|
||||||
|
|
555
backend/b2/b2.go
555
backend/b2/b2.go
|
@ -9,7 +9,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
|
@ -33,7 +32,6 @@ import (
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/multipart"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/pool"
|
"github.com/rclone/rclone/lib/pool"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
@ -59,8 +57,9 @@ const (
|
||||||
minChunkSize = 5 * fs.Mebi
|
minChunkSize = 5 * fs.Mebi
|
||||||
defaultChunkSize = 96 * fs.Mebi
|
defaultChunkSize = 96 * fs.Mebi
|
||||||
defaultUploadCutoff = 200 * fs.Mebi
|
defaultUploadCutoff = 200 * fs.Mebi
|
||||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||||
defaultMaxAge = 24 * time.Hour
|
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||||
|
memoryPoolUseMmap = false
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
|
@ -75,7 +74,6 @@ func init() {
|
||||||
Name: "b2",
|
Name: "b2",
|
||||||
Description: "Backblaze B2",
|
Description: "Backblaze B2",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Account ID or Application Key ID.",
|
Help: "Account ID or Application Key ID.",
|
||||||
|
@ -102,7 +100,7 @@ below will cause b2 to return specific errors:
|
||||||
* "force_cap_exceeded"
|
* "force_cap_exceeded"
|
||||||
|
|
||||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||||
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
|
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||||
Default: "",
|
Default: "",
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
@ -151,18 +149,6 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||||
5,000,000 Bytes is the minimum size.`,
|
5,000,000 Bytes is the minimum size.`,
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "upload_concurrency",
|
|
||||||
Help: `Concurrency for multipart uploads.
|
|
||||||
|
|
||||||
This is the number of chunks of the same file that are uploaded
|
|
||||||
concurrently.
|
|
||||||
|
|
||||||
Note that chunks are stored in memory and there may be up to
|
|
||||||
"--transfers" * "--b2-upload-concurrency" chunks stored at once
|
|
||||||
in memory.`,
|
|
||||||
Default: 4,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_checksum",
|
Name: "disable_checksum",
|
||||||
Help: `Disable checksums for large (> upload cutoff) files.
|
Help: `Disable checksums for large (> upload cutoff) files.
|
||||||
|
@ -194,57 +180,29 @@ Example:
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "download_auth_duration",
|
Name: "download_auth_duration",
|
||||||
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
|
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||||
|
|
||||||
This is used in combination with "rclone link" for making files
|
|
||||||
accessible to the public and sets the duration before the download
|
|
||||||
authorization token will expire.
|
|
||||||
|
|
||||||
|
The duration before the download authorization token will expire.
|
||||||
The minimum value is 1 second. The maximum value is one week.`,
|
The minimum value is 1 second. The maximum value is one week.`,
|
||||||
Default: fs.Duration(7 * 24 * time.Hour),
|
Default: fs.Duration(7 * 24 * time.Hour),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "memory_pool_flush_time",
|
Name: "memory_pool_flush_time",
|
||||||
Default: fs.Duration(time.Minute),
|
Default: memoryPoolFlushTime,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideBoth,
|
Help: `How often internal memory buffer pools will be flushed.
|
||||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||||
|
This option controls how often unused buffers will be removed from the pool.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "memory_pool_use_mmap",
|
Name: "memory_pool_use_mmap",
|
||||||
Default: false,
|
Default: memoryPoolUseMmap,
|
||||||
Advanced: true,
|
|
||||||
Hide: fs.OptionHideBoth,
|
|
||||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
|
||||||
}, {
|
|
||||||
Name: "lifecycle",
|
|
||||||
Help: `Set the number of days deleted files should be kept when creating a bucket.
|
|
||||||
|
|
||||||
On bucket creation, this parameter is used to create a lifecycle rule
|
|
||||||
for the entire bucket.
|
|
||||||
|
|
||||||
If lifecycle is 0 (the default) it does not create a lifecycle rule so
|
|
||||||
the default B2 behaviour applies. This is to create versions of files
|
|
||||||
on delete and overwrite and to keep them indefinitely.
|
|
||||||
|
|
||||||
If lifecycle is >0 then it creates a single rule setting the number of
|
|
||||||
days before a file that is deleted or overwritten is deleted
|
|
||||||
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
|
|
||||||
|
|
||||||
The minimum value for this parameter is 1 day.
|
|
||||||
|
|
||||||
You can also enable hard_delete in the config also which will mean
|
|
||||||
deletions won't cause versions but overwrites will still cause
|
|
||||||
versions to be made.
|
|
||||||
|
|
||||||
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
|
|
||||||
`,
|
|
||||||
Default: 0,
|
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
// See: https://www.backblaze.com/docs/cloud-storage-files
|
// See: https://www.backblaze.com/b2/docs/files.html
|
||||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||||
// FIXME: allow /, but not leading, trailing or double
|
// FIXME: allow /, but not leading, trailing or double
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
|
@ -266,11 +224,11 @@ type Options struct {
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
UploadConcurrency int `config:"upload_concurrency"`
|
|
||||||
DisableCheckSum bool `config:"disable_checksum"`
|
DisableCheckSum bool `config:"disable_checksum"`
|
||||||
DownloadURL string `config:"download_url"`
|
DownloadURL string `config:"download_url"`
|
||||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||||
Lifecycle int `config:"lifecycle"`
|
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||||
|
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,18 +253,18 @@ type Fs struct {
|
||||||
authMu sync.Mutex // lock for authorizing the account
|
authMu sync.Mutex // lock for authorizing the account
|
||||||
pacer *fs.Pacer // To pace and retry the API calls
|
pacer *fs.Pacer // To pace and retry the API calls
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
|
pool *pool.Pool // memory pool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a b2 object
|
// Object describes a b2 object
|
||||||
type Object struct {
|
type Object struct {
|
||||||
fs *Fs // what this object is part of
|
fs *Fs // what this object is part of
|
||||||
remote string // The remote path
|
remote string // The remote path
|
||||||
id string // b2 id of the file
|
id string // b2 id of the file
|
||||||
modTime time.Time // The modified time of the object if known
|
modTime time.Time // The modified time of the object if known
|
||||||
sha1 string // SHA-1 hash if known
|
sha1 string // SHA-1 hash if known
|
||||||
size int64 // Size of the object
|
size int64 // Size of the object
|
||||||
mimeType string // Content-Type of the object
|
mimeType string // Content-Type of the object
|
||||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
@ -364,7 +322,7 @@ var retryErrorCodes = []int{
|
||||||
504, // Gateway Time-out
|
504, // Gateway Time-out
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetryNoReauth returns a boolean as to whether this resp and err
|
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
|
@ -405,18 +363,11 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
func errorHandler(resp *http.Response) error {
|
func errorHandler(resp *http.Response) error {
|
||||||
body, err := rest.ReadBody(resp)
|
// Decode error response
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
|
|
||||||
body = nil
|
|
||||||
}
|
|
||||||
// Decode error response if there was one - they can be blank
|
|
||||||
errResponse := new(api.Error)
|
errResponse := new(api.Error)
|
||||||
if len(body) > 0 {
|
err := rest.DecodeJSON(resp, &errResponse)
|
||||||
err = json.Unmarshal(body, errResponse)
|
if err != nil {
|
||||||
if err != nil {
|
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||||
fs.Errorf(nil, "Couldn't decode error response: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if errResponse.Code == "" {
|
if errResponse.Code == "" {
|
||||||
errResponse.Code = "unknown"
|
errResponse.Code = "unknown"
|
||||||
|
@ -460,14 +411,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
||||||
err = checkUploadChunkSize(cs)
|
|
||||||
if err == nil {
|
|
||||||
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// setRoot changes the root of the Fs
|
// setRoot changes the root of the Fs
|
||||||
func (f *Fs) setRoot(root string) {
|
func (f *Fs) setRoot(root string) {
|
||||||
f.root = parsePath(root)
|
f.root = parsePath(root)
|
||||||
|
@ -515,14 +458,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
|
pool: pool.New(
|
||||||
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
|
int(opt.ChunkSize),
|
||||||
|
ci.Transfers,
|
||||||
|
opt.MemoryPoolUseMmap,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
ChunkWriterDoesntSeek: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
// Set the test flag if required
|
// Set the test flag if required
|
||||||
if opt.TestMode != "" {
|
if opt.TestMode != "" {
|
||||||
|
@ -649,24 +597,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
|
||||||
f.uploadMu.Unlock()
|
f.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRW gets a RW buffer and an upload token
|
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
|
||||||
//
|
//
|
||||||
// If noBuf is set then it just gets an upload token
|
// If noBuf is set then it just gets an upload token
|
||||||
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
|
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
|
||||||
f.uploadToken.Get()
|
f.uploadToken.Get()
|
||||||
if !noBuf {
|
if !noBuf {
|
||||||
rw = multipart.NewRW()
|
buf = f.pool.Get()
|
||||||
}
|
}
|
||||||
return rw
|
return buf
|
||||||
}
|
}
|
||||||
|
|
||||||
// putRW returns a RW buffer to the memory pool and returns an upload
|
// putBuf returns a buffer to the memory pool and an upload token
|
||||||
// token
|
|
||||||
//
|
//
|
||||||
// If buf is nil then it just returns the upload token
|
// If noBuf is set then it just returns the upload token
|
||||||
func (f *Fs) putRW(rw *pool.RW) {
|
func (f *Fs) putBuf(buf []byte, noBuf bool) {
|
||||||
if rw != nil {
|
if !noBuf {
|
||||||
_ = rw.Close()
|
f.pool.Put(buf)
|
||||||
}
|
}
|
||||||
f.uploadToken.Put()
|
f.uploadToken.Put()
|
||||||
}
|
}
|
||||||
|
@ -873,7 +820,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||||
|
|
||||||
// listBuckets returns all the buckets to out
|
// listBuckets returns all the buckets to out
|
||||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||||
d := fs.NewDir(bucket.Name, time.Time{})
|
d := fs.NewDir(bucket.Name, time.Time{})
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
return nil
|
return nil
|
||||||
|
@ -966,14 +913,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||||
type listBucketFn func(*api.Bucket) error
|
type listBucketFn func(*api.Bucket) error
|
||||||
|
|
||||||
// listBucketsToFn lists the buckets to the function supplied
|
// listBucketsToFn lists the buckets to the function supplied
|
||||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||||
var account = api.ListBucketsRequest{
|
var account = api.ListBucketsRequest{
|
||||||
AccountID: f.info.AccountID,
|
AccountID: f.info.AccountID,
|
||||||
BucketID: f.info.Allowed.BucketID,
|
BucketID: f.info.Allowed.BucketID,
|
||||||
}
|
}
|
||||||
if bucketName != "" && account.BucketID == "" {
|
|
||||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
var response api.ListBucketsResponse
|
var response api.ListBucketsResponse
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
|
@ -1019,7 +963,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
|
||||||
if bucketType != "" {
|
if bucketType != "" {
|
||||||
return bucketType, nil
|
return bucketType, nil
|
||||||
}
|
}
|
||||||
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||||
// listBucketsToFn reads bucket Types
|
// listBucketsToFn reads bucket Types
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -1054,7 +998,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
|
||||||
if bucketID != "" {
|
if bucketID != "" {
|
||||||
return bucketID, nil
|
return bucketID, nil
|
||||||
}
|
}
|
||||||
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
|
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
|
||||||
// listBucketsToFn sets IDs
|
// listBucketsToFn sets IDs
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -1118,11 +1062,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||||
Name: f.opt.Enc.FromStandardName(bucket),
|
Name: f.opt.Enc.FromStandardName(bucket),
|
||||||
Type: "allPrivate",
|
Type: "allPrivate",
|
||||||
}
|
}
|
||||||
if f.opt.Lifecycle > 0 {
|
|
||||||
request.LifecycleRules = []api.LifecycleRule{{
|
|
||||||
DaysFromHidingToDeleting: &f.opt.Lifecycle,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
var response api.Bucket
|
var response api.Bucket
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
|
@ -1250,7 +1189,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||||
// if oldOnly is true then it deletes only non current files.
|
// if oldOnly is true then it deletes only non current files.
|
||||||
//
|
//
|
||||||
// Implemented here so we can make sure we delete old versions.
|
// Implemented here so we can make sure we delete old versions.
|
||||||
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
|
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
if bucket == "" {
|
if bucket == "" {
|
||||||
return errors.New("can't purge from root")
|
return errors.New("can't purge from root")
|
||||||
|
@ -1268,7 +1207,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||||
return time.Since(time.Time(timestamp)) > maxAge
|
return time.Since(time.Time(timestamp)).Hours() > 24
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete Config.Transfers in parallel
|
// Delete Config.Transfers in parallel
|
||||||
|
@ -1291,21 +1230,6 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if oldOnly {
|
|
||||||
if deleteHidden && deleteUnfinished {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
|
|
||||||
} else if deleteHidden {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
|
|
||||||
} else if deleteUnfinished {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
|
||||||
} else {
|
|
||||||
fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.Infof(f, "cleaning bucket %q of all files", bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
last := ""
|
last := ""
|
||||||
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
if !isDirectory {
|
if !isDirectory {
|
||||||
|
@ -1316,14 +1240,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||||
if oldOnly && last != remote {
|
if oldOnly && last != remote {
|
||||||
// Check current version of the file
|
// Check current version of the file
|
||||||
if deleteHidden && object.Action == "hide" {
|
if object.Action == "hide" {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||||
|
@ -1345,17 +1269,12 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
return f.purge(ctx, dir, false, false, false, defaultMaxAge)
|
return f.purge(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
|
// CleanUp deletes all the hidden files.
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
return f.purge(ctx, "", true, true, true, defaultMaxAge)
|
return f.purge(ctx, "", true)
|
||||||
}
|
|
||||||
|
|
||||||
// cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
|
|
||||||
func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
|
|
||||||
return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy does a server-side copy from dstObj <- srcObj
|
// copy does a server-side copy from dstObj <- srcObj
|
||||||
|
@ -1363,7 +1282,7 @@ func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bo
|
||||||
// If newInfo is nil then the metadata will be copied otherwise it
|
// If newInfo is nil then the metadata will be copied otherwise it
|
||||||
// will be replaced with newInfo
|
// will be replaced with newInfo
|
||||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
||||||
if srcObj.size > int64(f.opt.CopyCutoff) {
|
if srcObj.size >= int64(f.opt.CopyCutoff) {
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
newInfo, err = srcObj.getMetaData(ctx)
|
newInfo, err = srcObj.getMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1374,11 +1293,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = up.Copy(ctx)
|
return up.Upload(ctx)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return dstObj.decodeMetaDataFileInfo(up.info)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dstBucket, dstPath := dstObj.split()
|
dstBucket, dstPath := dstObj.split()
|
||||||
|
@ -1507,7 +1422,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
absPath := "/" + urlEncode(bucketPath)
|
absPath := "/" + bucketPath
|
||||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||||
bucketType, err := f.getbucketType(ctx, bucket)
|
bucketType, err := f.getbucketType(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1567,7 +1482,7 @@ func (o *Object) Size() int64 {
|
||||||
//
|
//
|
||||||
// Make sure it is lower case.
|
// Make sure it is lower case.
|
||||||
//
|
//
|
||||||
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
|
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||||
// Some tools (e.g. Cyberduck) use this
|
// Some tools (e.g. Cyberduck) use this
|
||||||
func cleanSHA1(sha1 string) string {
|
func cleanSHA1(sha1 string) string {
|
||||||
const unverified = "unverified:"
|
const unverified = "unverified:"
|
||||||
|
@ -1594,14 +1509,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||||
o.size = Size
|
o.size = Size
|
||||||
// Use the UploadTimestamp if can't get file info
|
// Use the UploadTimestamp if can't get file info
|
||||||
o.modTime = time.Time(UploadTimestamp)
|
o.modTime = time.Time(UploadTimestamp)
|
||||||
err = o.parseTimeString(Info[timeKey])
|
return o.parseTimeString(Info[timeKey])
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// For now, just set "mtime" in metadata
|
|
||||||
o.meta = make(map[string]string, 1)
|
|
||||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeMetaData sets the metadata in the object from an api.File
|
// decodeMetaData sets the metadata in the object from an api.File
|
||||||
|
@ -1703,16 +1611,6 @@ func timeString(modTime time.Time) string {
|
||||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
|
||||||
// elapsed since January 1, 1970 UTC into a time.Time
|
|
||||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
|
||||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTimeString converts a decimal string number of milliseconds
|
// parseTimeString converts a decimal string number of milliseconds
|
||||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||||
// the modTime variable.
|
// the modTime variable.
|
||||||
|
@ -1720,12 +1618,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||||
if timeString == "" {
|
if timeString == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
modTime, err := parseTimeStringHelper(timeString)
|
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
o.modTime = modTime
|
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1802,14 +1700,14 @@ func (file *openFile) Close() (err error) {
|
||||||
|
|
||||||
// Check to see we read the correct number of bytes
|
// Check to see we read the correct number of bytes
|
||||||
if file.o.Size() != file.bytes {
|
if file.o.Size() != file.bytes {
|
||||||
return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
|
return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the SHA1
|
// Check the SHA1
|
||||||
receivedSHA1 := file.o.sha1
|
receivedSHA1 := file.o.sha1
|
||||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||||
return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
|
return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -1879,14 +1777,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||||
ContentType: resp.Header.Get("Content-Type"),
|
ContentType: resp.Header.Get("Content-Type"),
|
||||||
Info: Info,
|
Info: Info,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Embryonic metadata support - just mtime
|
|
||||||
o.meta = make(map[string]string, 1)
|
|
||||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
|
||||||
if err == nil {
|
|
||||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
|
||||||
}
|
|
||||||
|
|
||||||
// When reading files from B2 via cloudflare using
|
// When reading files from B2 via cloudflare using
|
||||||
// --b2-download-url cloudflare strips the Content-Length
|
// --b2-download-url cloudflare strips the Content-Length
|
||||||
// headers (presumably so it can inject stuff) so use the old
|
// headers (presumably so it can inject stuff) so use the old
|
||||||
|
@ -1971,11 +1861,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if size < 0 {
|
if size == -1 {
|
||||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||||
rw := o.fs.getRW(false)
|
buf := o.fs.getBuf(false)
|
||||||
|
|
||||||
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
|
n, err := io.ReadFull(in, buf)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
bufReader := bufio.NewReader(in)
|
bufReader := bufio.NewReader(in)
|
||||||
in = bufReader
|
in = bufReader
|
||||||
|
@ -1984,42 +1874,31 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.fs.putRW(rw)
|
o.fs.putBuf(buf, false)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// NB Stream returns the buffer and token
|
// NB Stream returns the buffer and token
|
||||||
err = up.Stream(ctx, rw)
|
return up.Stream(ctx, buf)
|
||||||
if err != nil {
|
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
return err
|
|
||||||
}
|
|
||||||
return o.decodeMetaDataFileInfo(up.info)
|
|
||||||
} else if err == io.EOF {
|
|
||||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||||
defer o.fs.putRW(rw)
|
defer o.fs.putBuf(buf, false)
|
||||||
size = n
|
size = int64(n)
|
||||||
in = rw
|
in = bytes.NewReader(buf[:n])
|
||||||
} else {
|
} else {
|
||||||
o.fs.putRW(rw)
|
o.fs.putBuf(buf, false)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||||
Open: o.fs,
|
|
||||||
OpenOptions: options,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up := chunkWriter.(*largeUpload)
|
return up.Upload(ctx)
|
||||||
return o.decodeMetaDataFileInfo(up.info)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime, err := o.getModTime(ctx, src, options)
|
modTime := src.ModTime(ctx)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||||
if calculatedSha1 == "" {
|
if calculatedSha1 == "" {
|
||||||
|
@ -2124,71 +2003,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
return o.decodeMetaDataFileInfo(&response)
|
return o.decodeMetaDataFileInfo(&response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
|
||||||
// When metadata support is added to b2, this method will need a more generic name
|
|
||||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
|
||||||
modTime := src.ModTime(ctx)
|
|
||||||
|
|
||||||
// Fetch metadata if --metadata is in use
|
|
||||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
// merge metadata into request and user metadata
|
|
||||||
for k, v := range meta {
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
// For now, the only metadata we're concerned with is "mtime"
|
|
||||||
switch k {
|
|
||||||
case "mtime":
|
|
||||||
// mtime in meta overrides source ModTime
|
|
||||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
|
||||||
} else {
|
|
||||||
modTime = metaModTime
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Do nothing for now
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return modTime, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
|
||||||
//
|
|
||||||
// Pass in the remote and the src object
|
|
||||||
// You can also use options to hint at the desired chunk size
|
|
||||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
|
||||||
// FIXME what if file is smaller than 1 chunk?
|
|
||||||
if f.opt.Versions {
|
|
||||||
return info, nil, errNotWithVersions
|
|
||||||
}
|
|
||||||
if f.opt.VersionAt.IsSet() {
|
|
||||||
return info, nil, errNotWithVersionAt
|
|
||||||
}
|
|
||||||
//size := src.Size()
|
|
||||||
|
|
||||||
// Temporary Object under construction
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
|
|
||||||
bucket, _ := o.split()
|
|
||||||
err = f.makeBucket(ctx, bucket)
|
|
||||||
if err != nil {
|
|
||||||
return info, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info = fs.ChunkWriterInfo{
|
|
||||||
ChunkSize: int64(f.opt.ChunkSize),
|
|
||||||
Concurrency: o.fs.opt.UploadConcurrency,
|
|
||||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
|
||||||
}
|
|
||||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
|
||||||
return info, up, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
|
@ -2214,201 +2028,16 @@ func (o *Object) ID() string {
|
||||||
return o.id
|
return o.id
|
||||||
}
|
}
|
||||||
|
|
||||||
var lifecycleHelp = fs.CommandHelp{
|
|
||||||
Name: "lifecycle",
|
|
||||||
Short: "Read or set the lifecycle for a bucket",
|
|
||||||
Long: `This command can be used to read or set the lifecycle for a bucket.
|
|
||||||
|
|
||||||
Usage Examples:
|
|
||||||
|
|
||||||
To show the current lifecycle rules:
|
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket
|
|
||||||
|
|
||||||
This will dump something like this showing the lifecycle rules.
|
|
||||||
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"daysFromHidingToDeleting": 1,
|
|
||||||
"daysFromUploadingToHiding": null,
|
|
||||||
"fileNamePrefix": ""
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
If there are no lifecycle rules (the default) then it will just return [].
|
|
||||||
|
|
||||||
To reset the current lifecycle rules:
|
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
|
||||||
|
|
||||||
This will run and then print the new lifecycle rules as above.
|
|
||||||
|
|
||||||
Rclone only lets you set lifecycles for the whole bucket with the
|
|
||||||
fileNamePrefix = "".
|
|
||||||
|
|
||||||
You can't disable versioning with B2. The best you can do is to set
|
|
||||||
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
|
||||||
the config also which will mean deletions won't cause versions but
|
|
||||||
overwrites will still cause versions to be made.
|
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
|
||||||
|
|
||||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
|
||||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
|
||||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
var newRule api.LifecycleRule
|
|
||||||
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
|
||||||
days, err := strconv.Atoi(daysStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
|
|
||||||
}
|
|
||||||
newRule.DaysFromHidingToDeleting = &days
|
|
||||||
}
|
|
||||||
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
|
|
||||||
days, err := strconv.Atoi(daysStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
|
|
||||||
}
|
|
||||||
newRule.DaysFromUploadingToHiding = &days
|
|
||||||
}
|
|
||||||
bucketName, _ := f.split("")
|
|
||||||
if bucketName == "" {
|
|
||||||
return nil, errors.New("bucket required")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var bucket *api.Bucket
|
|
||||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
|
||||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_update_bucket",
|
|
||||||
}
|
|
||||||
var request = api.UpdateBucketRequest{
|
|
||||||
ID: bucketID,
|
|
||||||
AccountID: f.info.AccountID,
|
|
||||||
LifecycleRules: []api.LifecycleRule{newRule},
|
|
||||||
}
|
|
||||||
var response api.Bucket
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bucket = &response
|
|
||||||
} else {
|
|
||||||
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
|
|
||||||
bucket = b
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bucket == nil {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return bucket.LifecycleRules, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var cleanupHelp = fs.CommandHelp{
|
|
||||||
Name: "cleanup",
|
|
||||||
Short: "Remove unfinished large file uploads.",
|
|
||||||
Long: `This command removes unfinished large file uploads of age greater than
|
|
||||||
max-age, which defaults to 24 hours.
|
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
|
||||||
it would do.
|
|
||||||
|
|
||||||
rclone backend cleanup b2:bucket/path/to/object
|
|
||||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
|
||||||
`,
|
|
||||||
Opts: map[string]string{
|
|
||||||
"max-age": "Max age of upload to delete",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
maxAge := defaultMaxAge
|
|
||||||
if opt["max-age"] != "" {
|
|
||||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, f.cleanUp(ctx, false, true, maxAge)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cleanupHiddenHelp = fs.CommandHelp{
|
|
||||||
Name: "cleanup-hidden",
|
|
||||||
Short: "Remove old versions of files.",
|
|
||||||
Long: `This command removes any old hidden versions of files.
|
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
|
||||||
it would do.
|
|
||||||
|
|
||||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
|
||||||
`,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
return nil, f.cleanUp(ctx, true, false, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{
|
|
||||||
lifecycleHelp,
|
|
||||||
cleanupHelp,
|
|
||||||
cleanupHiddenHelp,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
switch name {
|
|
||||||
case "lifecycle":
|
|
||||||
return f.lifecycleCommand(ctx, name, arg, opt)
|
|
||||||
case "cleanup":
|
|
||||||
return f.cleanupCommand(ctx, name, arg, opt)
|
|
||||||
case "cleanup-hidden":
|
|
||||||
return f.cleanupHiddenCommand(ctx, name, arg, opt)
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.Purger = &Fs{}
|
_ fs.Purger = &Fs{}
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.CleanUpper = &Fs{}
|
_ fs.CleanUpper = &Fs{}
|
||||||
_ fs.ListRer = &Fs{}
|
_ fs.ListRer = &Fs{}
|
||||||
_ fs.PublicLinker = &Fs{}
|
_ fs.PublicLinker = &Fs{}
|
||||||
_ fs.OpenChunkWriter = &Fs{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.Commander = &Fs{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.IDer = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
|
||||||
_ fs.IDer = &Object{}
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,29 +1,14 @@
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/sha1"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||||
|
|
||||||
var encodeTest = []struct {
|
var encodeTest = []struct {
|
||||||
fullyEncoded string
|
fullyEncoded string
|
||||||
|
@ -183,304 +168,3 @@ func TestParseTimeString(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
|
||||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
|
||||||
var headers = make(map[string]string)
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
headers[k[len(headerPrefix):]] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
|
||||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
|
||||||
t.Run(what, func(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(size)
|
|
||||||
require.NoError(t, err)
|
|
||||||
original := random.String(int(ss))
|
|
||||||
|
|
||||||
contents := fstest.Gz(t, original)
|
|
||||||
mimeType := "text/html"
|
|
||||||
|
|
||||||
if chunkSize != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(chunkSize)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadChunkSize(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if uploadCutoff != "" {
|
|
||||||
ss := fs.SizeSuffix(0)
|
|
||||||
err := ss.Set(uploadCutoff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.SetUploadCutoff(ss)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
|
||||||
btime := time.Now()
|
|
||||||
metadata := fs.Metadata{
|
|
||||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
|
||||||
|
|
||||||
"mtime": "2009-05-06T04:05:06.499Z",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
|
||||||
options := []fs.OpenOption{
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
|
||||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// X-Bz-Info-a & X-Bz-Info-b
|
|
||||||
optMetadata := OpenOptionToMetaData(options)
|
|
||||||
for k, v := range optMetadata {
|
|
||||||
got := gotMetadata.Info[k]
|
|
||||||
assert.Equal(t, v, got, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mtime
|
|
||||||
for k, v := range metadata {
|
|
||||||
got := o.meta[k]
|
|
||||||
assert.Equal(t, v, got, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
|
||||||
|
|
||||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
|
||||||
var mtime api.Timestamp
|
|
||||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
|
||||||
}
|
|
||||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
|
||||||
|
|
||||||
// Upload time
|
|
||||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
|
||||||
dt := gotBtime.Sub(btime)
|
|
||||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
|
||||||
|
|
||||||
t.Run("GzipEncoding", func(t *testing.T) {
|
|
||||||
// Test that the gzipped file we uploaded can be
|
|
||||||
// downloaded
|
|
||||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
|
||||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
|
||||||
assert.Equal(t, wantContents, gotContents)
|
|
||||||
assert.Equal(t, wantSize, o.Size())
|
|
||||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, wantHash, gotHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("NoDecompress", func(t *testing.T) {
|
|
||||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|
||||||
// 1 kB regular file
|
|
||||||
f.internalTestMetadata(t, "1kiB", "", "")
|
|
||||||
|
|
||||||
// 10 MiB large file
|
|
||||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha1Sum(t *testing.T, s string) string {
|
|
||||||
hash := sha1.Sum([]byte(s))
|
|
||||||
return fmt.Sprintf("%x", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is adapted from the s3 equivalent.
|
|
||||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS
|
|
||||||
// only seems to track them to 1 second granularity
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Create an object
|
|
||||||
const dirName = "versions"
|
|
||||||
const fileName = dirName + "/" + "test-versions.txt"
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
}()
|
|
||||||
objMetadata, err := obj.(*Object).getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Small pause
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Remove it
|
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
|
||||||
|
|
||||||
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// And create it with different size and contents
|
|
||||||
newContents := random.String(101)
|
|
||||||
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
|
||||||
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
|
||||||
newObjMetadata, err := newObj.(*Object).getMetaData(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("Versions", func(t *testing.T) {
|
|
||||||
// Set --b2-versions for this test
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read the contents
|
|
||||||
entries, err := f.List(ctx, dirName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
tests := 0
|
|
||||||
var fileNameVersion string
|
|
||||||
for _, entry := range entries {
|
|
||||||
t.Log(entry)
|
|
||||||
remote := entry.Remote()
|
|
||||||
if remote == fileName {
|
|
||||||
t.Run("ReadCurrent", func(t *testing.T) {
|
|
||||||
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
tests++
|
|
||||||
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
|
||||||
t.Run("ReadVersion", func(t *testing.T) {
|
|
||||||
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
|
||||||
})
|
|
||||||
assert.WithinDuration(t, time.Time(objMetadata.UploadTimestamp), versionTime, time.Second, "object time must be with 1 second of version time")
|
|
||||||
fileNameVersion = remote
|
|
||||||
tests++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, 2, tests, "object missing from listing")
|
|
||||||
|
|
||||||
// Check we can read the object with a version suffix
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, fileNameVersion)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, o)
|
|
||||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Check we can make a NewFs from that object with a version suffix
|
|
||||||
t.Run("NewFs", func(t *testing.T) {
|
|
||||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
|
||||||
// Make sure --b2-versions is set in the config of the new remote
|
|
||||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
|
||||||
lastColon := strings.LastIndex(newPath, ":")
|
|
||||||
require.True(t, lastColon >= 0)
|
|
||||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
|
||||||
fs.Debugf(nil, "newPath = %q", newPath)
|
|
||||||
fNew, err := cache.Get(ctx, newPath)
|
|
||||||
// This should return pointing to a file
|
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
|
||||||
require.NotNil(t, fNew)
|
|
||||||
// With the directory above
|
|
||||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("VersionAt", func(t *testing.T) {
|
|
||||||
// We set --b2-version-at for this test so make sure we reset it at the end
|
|
||||||
defer func() {
|
|
||||||
f.opt.VersionAt = fs.Time{}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
|
||||||
firstObjectTime = time.Time(objMetadata.UploadTimestamp)
|
|
||||||
secondObjectTime = time.Time(newObjMetadata.UploadTimestamp)
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, test := range []struct {
|
|
||||||
what string
|
|
||||||
at time.Time
|
|
||||||
want []fstest.Item
|
|
||||||
wantErr error
|
|
||||||
wantSize int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
what: "Before",
|
|
||||||
at: firstObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterOne",
|
|
||||||
at: firstObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterDelete",
|
|
||||||
at: secondObjectTime.Add(-time.Second),
|
|
||||||
want: fstests.InternalTestFiles,
|
|
||||||
wantErr: fs.ErrorObjectNotFound,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
what: "AfterTwo",
|
|
||||||
at: secondObjectTime.Add(time.Second),
|
|
||||||
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
|
||||||
wantSize: 101,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(test.what, func(t *testing.T) {
|
|
||||||
f.opt.VersionAt = fs.Time(test.at)
|
|
||||||
t.Run("List", func(t *testing.T) {
|
|
||||||
fstest.CheckListing(t, f, test.want)
|
|
||||||
})
|
|
||||||
// b2 NewObject doesn't work with VersionAt
|
|
||||||
//t.Run("NewObject", func(t *testing.T) {
|
|
||||||
// gotObj, gotErr := f.NewObject(ctx, fileName)
|
|
||||||
// assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
// if gotErr == nil {
|
|
||||||
// assert.Equal(t, test.wantSize, gotObj.Size())
|
|
||||||
// }
|
|
||||||
//})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Cleanup", func(t *testing.T) {
|
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
|
||||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
|
||||||
fstest.CheckListing(t, f, items)
|
|
||||||
// Set --b2-versions for this test
|
|
||||||
f.opt.Versions = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.Versions = false
|
|
||||||
}()
|
|
||||||
fstest.CheckListing(t, f, items)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Purge gets tested later
|
|
||||||
}
|
|
||||||
|
|
||||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Metadata", f.InternalTestMetadata)
|
|
||||||
t.Run("Versions", f.InternalTestVersions)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|
|
@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||||
return f.setUploadCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
// Upload large files for b2
|
// Upload large files for b2
|
||||||
//
|
//
|
||||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||||
|
|
||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
@ -13,6 +14,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
@ -78,31 +80,36 @@ type largeUpload struct {
|
||||||
wrap accounting.WrapFn // account parts being transferred
|
wrap accounting.WrapFn // account parts being transferred
|
||||||
id string // ID of the file being uploaded
|
id string // ID of the file being uploaded
|
||||||
size int64 // total size
|
size int64 // total size
|
||||||
parts int // calculated number of parts, if known
|
parts int64 // calculated number of parts, if known
|
||||||
sha1smu sync.Mutex // mutex to protect sha1s
|
|
||||||
sha1s []string // slice of SHA1s for each part
|
sha1s []string // slice of SHA1s for each part
|
||||||
uploadMu sync.Mutex // lock for upload variable
|
uploadMu sync.Mutex // lock for upload variable
|
||||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||||
chunkSize int64 // chunk size to use
|
chunkSize int64 // chunk size to use
|
||||||
src *Object // if copying, object we are reading from
|
src *Object // if copying, object we are reading from
|
||||||
info *api.FileInfo // final response with info about the object
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
//
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := 0
|
parts := int64(0)
|
||||||
|
sha1SliceSize := int64(maxParts)
|
||||||
chunkSize := defaultChunkSize
|
chunkSize := defaultChunkSize
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||||
parts = int(size / int64(chunkSize))
|
parts = size / int64(chunkSize)
|
||||||
if size%int64(chunkSize) != 0 {
|
if size%int64(chunkSize) != 0 {
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
|
sha1SliceSize = parts
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_start_large_file",
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
bucketID, err := f.getBucketID(ctx, bucket)
|
bucketID, err := f.getBucketID(ctx, bucket)
|
||||||
|
@ -113,27 +120,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||||
}
|
}
|
||||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
modTime, err := o.getModTime(ctx, src, options)
|
modTime := src.ModTime(ctx)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
request.ContentType = fs.MimeType(ctx, src)
|
request.ContentType = fs.MimeType(ctx, src)
|
||||||
request.Info = map[string]string{
|
request.Info = map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
}
|
}
|
||||||
// Custom upload headers - remove header prefix since they are sent in the body
|
|
||||||
for _, option := range options {
|
|
||||||
k, v := option.Header()
|
|
||||||
k = strings.ToLower(k)
|
|
||||||
if strings.HasPrefix(k, headerPrefix) {
|
|
||||||
request.Info[k[len(headerPrefix):]] = v
|
|
||||||
} else {
|
|
||||||
optionsToSend = append(optionsToSend, option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
|
@ -144,11 +136,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||||
request.ContentType = newInfo.ContentType
|
request.ContentType = newInfo.ContentType
|
||||||
request.Info = newInfo.Info
|
request.Info = newInfo.Info
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_start_large_file",
|
|
||||||
Options: optionsToSend,
|
|
||||||
}
|
|
||||||
var response api.StartLargeFileResponse
|
var response api.StartLargeFileResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
|
@ -165,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||||
id: response.ID,
|
id: response.ID,
|
||||||
size: size,
|
size: size,
|
||||||
parts: parts,
|
parts: parts,
|
||||||
sha1s: make([]string, 0, 16),
|
sha1s: make([]string, sha1SliceSize),
|
||||||
chunkSize: int64(chunkSize),
|
chunkSize: int64(chunkSize),
|
||||||
}
|
}
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
|
@ -184,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||||
// This should be returned with returnUploadURL when finished
|
// This should be returned with returnUploadURL when finished
|
||||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||||
up.uploadMu.Lock()
|
up.uploadMu.Lock()
|
||||||
if len(up.uploads) > 0 {
|
defer up.uploadMu.Unlock()
|
||||||
|
if len(up.uploads) == 0 {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_get_upload_part_url",
|
||||||
|
}
|
||||||
|
var request = api.GetUploadPartURLRequest{
|
||||||
|
ID: up.id,
|
||||||
|
}
|
||||||
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||||
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||||
up.uploadMu.Unlock()
|
|
||||||
return upload, nil
|
|
||||||
}
|
|
||||||
up.uploadMu.Unlock()
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_get_upload_part_url",
|
|
||||||
}
|
|
||||||
var request = api.GetUploadPartURLRequest{
|
|
||||||
ID: up.id,
|
|
||||||
}
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
|
||||||
}
|
}
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
|
@ -218,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||||
up.uploadMu.Unlock()
|
up.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an sha1 to the being built up sha1s
|
// Transfer a chunk
|
||||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||||
up.sha1smu.Lock()
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
defer up.sha1smu.Unlock()
|
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||||
if len(up.sha1s) < chunkNumber+1 {
|
|
||||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
|
||||||
}
|
|
||||||
up.sha1s[chunkNumber] = sha1
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
|
||||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
|
||||||
// Only account after the checksum reads have been done
|
|
||||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
|
||||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
|
||||||
// multiple of what it should be, increase or decrease this number.
|
|
||||||
do.DelayAccounting(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
// Discover the size by seeking to the end
|
|
||||||
size, err = reader.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// rewind the reader on retry and after reading size
|
|
||||||
_, err = reader.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
|
||||||
|
|
||||||
// Get upload URL
|
// Get upload URL
|
||||||
upload, err := up.getUploadURL(ctx)
|
upload, err := up.getUploadURL(ctx)
|
||||||
|
@ -258,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
in := newHashAppendingReader(reader, sha1.New())
|
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||||
sizeWithHash := size + int64(in.AdditionalLength())
|
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||||
|
|
||||||
// Authorization
|
// Authorization
|
||||||
//
|
//
|
||||||
|
@ -289,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||||
Body: up.wrap(in),
|
Body: up.wrap(in),
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Authorization": upload.AuthorizationToken,
|
"Authorization": upload.AuthorizationToken,
|
||||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||||
sha1Header: "hex_digits_at_end",
|
sha1Header: "hex_digits_at_end",
|
||||||
},
|
},
|
||||||
ContentLength: &sizeWithHash,
|
ContentLength: &size,
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
|
@ -300,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
// On retryable error clear PartUploadURL
|
// On retryable error clear PartUploadURL
|
||||||
if retry {
|
if retry {
|
||||||
|
@ -308,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||||
upload = nil
|
upload = nil
|
||||||
}
|
}
|
||||||
up.returnUploadURL(upload)
|
up.returnUploadURL(upload)
|
||||||
up.addSha1(chunkNumber, in.HexSum())
|
up.sha1s[part-1] = in.HexSum()
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||||
}
|
}
|
||||||
return size, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy a chunk
|
// Copy a chunk
|
||||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_copy_part",
|
Path: "/b2_copy_part",
|
||||||
}
|
}
|
||||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||||
var request = api.CopyPartRequest{
|
var request = api.CopyPartRequest{
|
||||||
SourceID: up.src.id,
|
SourceID: up.src.id,
|
||||||
LargeFileID: up.id,
|
LargeFileID: up.id,
|
||||||
PartNumber: int64(part + 1),
|
PartNumber: part,
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||||
}
|
}
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
|
@ -340,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
up.addSha1(part, response.SHA1)
|
up.sha1s[part-1] = response.SHA1
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -351,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes off the large upload
|
// finish closes off the large upload
|
||||||
func (up *largeUpload) Close(ctx context.Context) error {
|
func (up *largeUpload) finish(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
@ -370,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up.info = &response
|
return up.o.decodeMetaDataFileInfo(&response)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abort aborts the large upload
|
// cancel aborts the large upload
|
||||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
@ -400,105 +355,157 @@ func (up *largeUpload) Abort(ctx context.Context) error {
|
||||||
// reaches EOF.
|
// reaches EOF.
|
||||||
//
|
//
|
||||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
hasMoreParts = true
|
hasMoreParts = true
|
||||||
)
|
)
|
||||||
up.size = initialUploadBlock.Size()
|
up.size = int64(len(initialUploadBlock))
|
||||||
up.parts = 0
|
g.Go(func() error {
|
||||||
for part := 0; hasMoreParts; part++ {
|
for part := int64(1); hasMoreParts; part++ {
|
||||||
// Get a block of memory from the pool and token which limits concurrency.
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
var rw *pool.RW
|
var buf []byte
|
||||||
if part == 0 {
|
if part == 1 {
|
||||||
rw = initialUploadBlock
|
buf = initialUploadBlock
|
||||||
} else {
|
} else {
|
||||||
rw = up.f.getRW(false)
|
buf = up.f.getBuf(false)
|
||||||
}
|
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
||||||
if gCtx.Err() != nil {
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunk
|
|
||||||
var n int64
|
|
||||||
if part == 0 {
|
|
||||||
n = rw.Size()
|
|
||||||
} else {
|
|
||||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
|
||||||
if err == io.EOF {
|
|
||||||
if n == 0 {
|
|
||||||
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
|
||||||
}
|
|
||||||
hasMoreParts = false
|
|
||||||
} else if err != nil {
|
|
||||||
// other kinds of errors indicate failure
|
|
||||||
up.f.putRW(rw)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Keep stats up to date
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
up.parts += 1
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
up.size += n
|
if gCtx.Err() != nil {
|
||||||
if part > maxParts {
|
up.f.putBuf(buf, false)
|
||||||
up.f.putRW(rw)
|
return nil
|
||||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
// Read the chunk
|
||||||
g.Go(func() (err error) {
|
var n int
|
||||||
defer up.f.putRW(rw)
|
if part == 1 {
|
||||||
_, err = up.WriteChunk(gCtx, part, rw)
|
n = len(buf)
|
||||||
return err
|
} else {
|
||||||
})
|
n, err = io.ReadFull(up.in, buf)
|
||||||
}
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||||
|
buf = buf[:n]
|
||||||
|
hasMoreParts = false
|
||||||
|
} else if err == io.EOF {
|
||||||
|
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// other kinds of errors indicate failure
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep stats up to date
|
||||||
|
up.parts = part
|
||||||
|
up.size += int64(n)
|
||||||
|
if part > maxParts {
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer up.f.putBuf(buf, false)
|
||||||
|
return up.transferChunk(gCtx, part, buf)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Close(ctx)
|
up.sha1s = up.sha1s[:up.parts]
|
||||||
|
return up.finish(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the chunks from the source to the destination
|
// Upload uploads the chunks from the input
|
||||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
remaining = up.size
|
remaining = up.size
|
||||||
|
uploadPool *pool.Pool
|
||||||
|
ci = fs.GetConfig(ctx)
|
||||||
)
|
)
|
||||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
// If using large chunk size then make a temporary pool
|
||||||
for part := 0; part < up.parts; part++ {
|
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
uploadPool = up.f.pool
|
||||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
} else {
|
||||||
if gCtx.Err() != nil {
|
uploadPool = pool.New(
|
||||||
break
|
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||||
}
|
int(up.chunkSize),
|
||||||
|
ci.Transfers,
|
||||||
reqSize := remaining
|
up.f.opt.MemoryPoolUseMmap,
|
||||||
if reqSize >= up.chunkSize {
|
)
|
||||||
reqSize = up.chunkSize
|
defer uploadPool.Flush()
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
|
||||||
g.Go(func() (err error) {
|
|
||||||
return up.copyChunk(gCtx, part, reqSize)
|
|
||||||
})
|
|
||||||
remaining -= reqSize
|
|
||||||
}
|
}
|
||||||
|
// Get an upload token and a buffer
|
||||||
|
getBuf := func() (buf []byte) {
|
||||||
|
up.f.getBuf(true)
|
||||||
|
if !up.doCopy {
|
||||||
|
buf = uploadPool.Get()
|
||||||
|
}
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
// Put an upload token and a buffer
|
||||||
|
putBuf := func(buf []byte) {
|
||||||
|
if !up.doCopy {
|
||||||
|
uploadPool.Put(buf)
|
||||||
|
}
|
||||||
|
up.f.putBuf(nil, true)
|
||||||
|
}
|
||||||
|
g.Go(func() error {
|
||||||
|
for part := int64(1); part <= up.parts; part++ {
|
||||||
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
|
buf := getBuf()
|
||||||
|
|
||||||
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
|
if gCtx.Err() != nil {
|
||||||
|
putBuf(buf)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reqSize := remaining
|
||||||
|
if reqSize >= up.chunkSize {
|
||||||
|
reqSize = up.chunkSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if !up.doCopy {
|
||||||
|
// Read the chunk
|
||||||
|
buf = buf[:reqSize]
|
||||||
|
_, err = io.ReadFull(up.in, buf)
|
||||||
|
if err != nil {
|
||||||
|
putBuf(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer putBuf(buf)
|
||||||
|
if !up.doCopy {
|
||||||
|
err = up.transferChunk(gCtx, part, buf)
|
||||||
|
} else {
|
||||||
|
err = up.copyChunk(gCtx, part, reqSize)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
remaining -= reqSize
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Close(ctx)
|
return up.finish(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
||||||
out += ": " + e.Message
|
out += ": " + e.Message
|
||||||
}
|
}
|
||||||
if e.ContextInfo != nil {
|
if e.ContextInfo != nil {
|
||||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||||
|
|
||||||
// Types of things in Item/ItemMini
|
// Types of things in Item
|
||||||
const (
|
const (
|
||||||
ItemTypeFolder = "folder"
|
ItemTypeFolder = "folder"
|
||||||
ItemTypeFile = "file"
|
ItemTypeFile = "file"
|
||||||
|
@ -72,31 +72,20 @@ const (
|
||||||
ItemStatusDeleted = "deleted"
|
ItemStatusDeleted = "deleted"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
|
||||||
type ItemMini struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
|
||||||
Etag string `json:"etag"`
|
|
||||||
SHA1 string `json:"sha1"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||||
type Item struct {
|
type Item struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
SequenceID string `json:"sequence_id"`
|
||||||
Etag string `json:"etag"`
|
Etag string `json:"etag"`
|
||||||
SHA1 string `json:"sha1"`
|
SHA1 string `json:"sha1"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||||
CreatedAt Time `json:"created_at"`
|
CreatedAt Time `json:"created_at"`
|
||||||
ModifiedAt Time `json:"modified_at"`
|
ModifiedAt Time `json:"modified_at"`
|
||||||
ContentCreatedAt Time `json:"content_created_at"`
|
ContentCreatedAt Time `json:"content_created_at"`
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||||
Parent ItemMini `json:"parent"`
|
|
||||||
SharedLink struct {
|
SharedLink struct {
|
||||||
URL string `json:"url,omitempty"`
|
URL string `json:"url,omitempty"`
|
||||||
Access string `json:"access,omitempty"`
|
Access string `json:"access,omitempty"`
|
||||||
|
@ -167,7 +156,19 @@ type PreUploadCheckResponse struct {
|
||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||||
type PreUploadCheckConflict struct {
|
type PreUploadCheckConflict struct {
|
||||||
Conflicts ItemMini `json:"conflicts"`
|
Conflicts struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
FileVersion struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
} `json:"file_version"`
|
||||||
|
SequenceID string `json:"sequence_id"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
Sha1 string `json:"sha1"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"conflicts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
|
@ -280,30 +281,3 @@ type User struct {
|
||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
AvatarURL string `json:"avatar_url"`
|
AvatarURL string `json:"avatar_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
|
||||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
|
||||||
"ITEM_COPY": {},
|
|
||||||
"ITEM_CREATE": {},
|
|
||||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
|
||||||
"ITEM_MODIFY": {},
|
|
||||||
"ITEM_MOVE": {},
|
|
||||||
"ITEM_RENAME": {},
|
|
||||||
"ITEM_TRASH": {},
|
|
||||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
|
||||||
"ITEM_UPLOAD": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event is an array element in the response returned from /events
|
|
||||||
type Event struct {
|
|
||||||
EventType string `json:"event_type"`
|
|
||||||
EventID string `json:"event_id"`
|
|
||||||
Source Item `json:"source"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events is returned from /events
|
|
||||||
type Events struct {
|
|
||||||
ChunkSize int64 `json:"chunk_size"`
|
|
||||||
Entries []Event `json:"entries"`
|
|
||||||
NextStreamPosition int64 `json:"next_stream_position"`
|
|
||||||
}
|
|
||||||
|
|
|
@ -149,23 +149,6 @@ func init() {
|
||||||
Default: "",
|
Default: "",
|
||||||
Help: "Only show items owned by the login (email address) passed in.",
|
Help: "Only show items owned by the login (email address) passed in.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "impersonate",
|
|
||||||
Default: "",
|
|
||||||
Help: `Impersonate this user ID when using a service account.
|
|
||||||
|
|
||||||
Setting this flag allows rclone, when using a JWT service account, to
|
|
||||||
act on behalf of another user by setting the as-user header.
|
|
||||||
|
|
||||||
The user ID is the Box identifier for a user. User IDs can found for
|
|
||||||
any user via the GET /users endpoint, which is only available to
|
|
||||||
admins, or by calling the GET /users/me endpoint with an authenticated
|
|
||||||
user session.
|
|
||||||
|
|
||||||
See: https://developer.box.com/guides/authentication/jwt/as-user/
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
|
@ -279,29 +262,19 @@ type Options struct {
|
||||||
AccessToken string `config:"access_token"`
|
AccessToken string `config:"access_token"`
|
||||||
ListChunk int `config:"list_chunk"`
|
ListChunk int `config:"list_chunk"`
|
||||||
OwnedBy string `config:"owned_by"`
|
OwnedBy string `config:"owned_by"`
|
||||||
Impersonate string `config:"impersonate"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemMeta defines metadata we cache for each Item ID
|
|
||||||
type ItemMeta struct {
|
|
||||||
SequenceID int64 // the most recent event processed for this item
|
|
||||||
ParentID string // ID of the parent directory of this item
|
|
||||||
Name string // leaf name of this item
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote box
|
// Fs represents a remote box
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
|
|
||||||
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a box object
|
// Object describes a box object
|
||||||
|
@ -380,7 +353,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
|
@ -389,30 +362,20 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use preupload to find the ID
|
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||||
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
|
if strings.EqualFold(item.Name, leaf) {
|
||||||
if err != nil {
|
info = item
|
||||||
return nil, err
|
return true
|
||||||
}
|
}
|
||||||
if itemMini == nil {
|
return false
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we have the ID we can look up the object proper
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/files/" + itemMini.ID,
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
var item api.Item
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &item, nil
|
if !found {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorHandler parses a non 2xx error response into an error
|
// errorHandler parses a non 2xx error response into an error
|
||||||
|
@ -459,14 +422,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||||
itemMetaCacheMu: new(sync.Mutex),
|
|
||||||
itemMetaCache: make(map[string]ItemMeta),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
|
@ -479,11 +440,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using impersonate set an as-user header
|
|
||||||
if f.opt.Impersonate != "" {
|
|
||||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
|
||||||
|
@ -726,17 +682,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||||
}
|
}
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache some metadata for this Item to help us process events later
|
|
||||||
// on. In particular, the box event API does not provide the old path
|
|
||||||
// of the Item when it is renamed/deleted/moved/etc.
|
|
||||||
f.itemMetaCacheMu.Lock()
|
|
||||||
cachedItemMeta, found := f.itemMetaCache[info.ID]
|
|
||||||
if !found || cachedItemMeta.SequenceID < info.SequenceID {
|
|
||||||
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
|
|
||||||
}
|
|
||||||
f.itemMetaCacheMu.Unlock()
|
|
||||||
|
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -772,7 +717,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||||
//
|
//
|
||||||
// It returns "", nil if the file is good to go
|
// It returns "", nil if the file is good to go
|
||||||
// It returns "ID", nil if the file must be updated
|
// It returns "ID", nil if the file must be updated
|
||||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
|
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||||
check := api.PreUploadCheck{
|
check := api.PreUploadCheck{
|
||||||
Name: f.opt.Enc.FromStandardName(leaf),
|
Name: f.opt.Enc.FromStandardName(leaf),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
|
@ -797,16 +742,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
||||||
var conflict api.PreUploadCheckConflict
|
var conflict api.PreUploadCheckConflict
|
||||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||||
}
|
}
|
||||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||||
return nil, fs.ErrorIsDir
|
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||||
}
|
}
|
||||||
return &conflict.Conflicts, nil
|
return conflict.Conflicts.ID, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("pre-upload check: %w", err)
|
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
|
@ -827,11 +772,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||||
|
|
||||||
// Preflight check the upload, which returns the ID if the
|
// Preflight check the upload, which returns the ID if the
|
||||||
// object already exists
|
// object already exists
|
||||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if item == nil {
|
if ID == "" {
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -839,7 +784,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
id: item.ID,
|
id: ID,
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
@ -1176,7 +1121,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
var (
|
var (
|
||||||
deleteErrors atomic.Uint64
|
deleteErrors = int64(0)
|
||||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
)
|
)
|
||||||
|
@ -1192,7 +1137,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||||
deleteErrors.Add(1)
|
atomic.AddInt64(&deleteErrors, 1)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
|
@ -1201,283 +1146,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if deleteErrors.Load() != 0 {
|
if deleteErrors != 0 {
|
||||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
|
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shutdown the fs
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.tokenRenewer.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
|
||||||
// If the implementation uses polling, it should adhere to the given interval.
|
|
||||||
//
|
|
||||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
|
||||||
//
|
|
||||||
// Close the returned channel to stop being notified.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
|
||||||
go func() {
|
|
||||||
// get the `stream_position` early so all changes from now on get processed
|
|
||||||
streamPosition, err := f.changeNotifyStreamPosition(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// box can send duplicate Event IDs. Use this map to track and filter
|
|
||||||
// the ones we've already processed.
|
|
||||||
processedEventIDs := make(map[string]time.Time)
|
|
||||||
|
|
||||||
var ticker *time.Ticker
|
|
||||||
var tickerC <-chan time.Time
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case pollInterval, ok := <-pollIntervalChan:
|
|
||||||
if !ok {
|
|
||||||
if ticker != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ticker != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
ticker, tickerC = nil, nil
|
|
||||||
}
|
|
||||||
if pollInterval != 0 {
|
|
||||||
ticker = time.NewTicker(pollInterval)
|
|
||||||
tickerC = ticker.C
|
|
||||||
}
|
|
||||||
case <-tickerC:
|
|
||||||
if streamPosition == "" {
|
|
||||||
streamPosition, err = f.changeNotifyStreamPosition(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Garbage collect EventIDs older than 1 minute
|
|
||||||
for eventID, timestamp := range processedEventIDs {
|
|
||||||
if time.Since(timestamp) > time.Minute {
|
|
||||||
delete(processedEventIDs, eventID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/events",
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
opts.Parameters.Set("stream_position", "now")
|
|
||||||
opts.Parameters.Set("stream_type", "changes")
|
|
||||||
|
|
||||||
var result api.Events
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.FormatInt(result.NextStreamPosition, 10), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempts to construct the full path for an object, given the ID of its
|
|
||||||
// parent directory and the name of the object.
|
|
||||||
//
|
|
||||||
// Can return "" if the parentID is not currently in the directory cache.
|
|
||||||
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
|
||||||
fullPath = ""
|
|
||||||
name := f.opt.Enc.ToStandardName(childName)
|
|
||||||
if parentID != "" {
|
|
||||||
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
|
|
||||||
if len(parentDir) > 0 {
|
|
||||||
fullPath = parentDir + "/" + name
|
|
||||||
} else {
|
|
||||||
fullPath = name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No parent, this object is at the root
|
|
||||||
fullPath = name
|
|
||||||
}
|
|
||||||
return fullPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
|
|
||||||
nextStreamPosition = streamPosition
|
|
||||||
|
|
||||||
for {
|
|
||||||
limit := f.opt.ListChunk
|
|
||||||
|
|
||||||
// box only allows a max of 500 events
|
|
||||||
if limit > 500 {
|
|
||||||
limit = 500
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/events",
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
opts.Parameters.Set("stream_position", nextStreamPosition)
|
|
||||||
opts.Parameters.Set("stream_type", "changes")
|
|
||||||
opts.Parameters.Set("limit", strconv.Itoa(limit))
|
|
||||||
|
|
||||||
var result api.Events
|
|
||||||
var resp *http.Response
|
|
||||||
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.ChunkSize != int64(len(result.Entries)) {
|
|
||||||
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
|
|
||||||
}
|
|
||||||
|
|
||||||
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
|
|
||||||
if result.ChunkSize == 0 {
|
|
||||||
return nextStreamPosition, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type pathToClear struct {
|
|
||||||
path string
|
|
||||||
entryType fs.EntryType
|
|
||||||
}
|
|
||||||
var pathsToClear []pathToClear
|
|
||||||
newEventIDs := 0
|
|
||||||
for _, entry := range result.Entries {
|
|
||||||
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
|
|
||||||
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
|
|
||||||
|
|
||||||
if entry.EventID == "" {
|
|
||||||
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := processedEventIDs[entry.EventID]; ok {
|
|
||||||
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
processedEventIDs[entry.EventID] = time.Now()
|
|
||||||
newEventIDs++
|
|
||||||
|
|
||||||
if entry.Source.ID == "" { // missing File or Folder ID
|
|
||||||
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
|
||||||
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only interested in event types that result in a file tree change
|
|
||||||
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
|
||||||
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f.itemMetaCacheMu.Lock()
|
|
||||||
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
|
|
||||||
if cachedItemMetaFound {
|
|
||||||
if itemMeta.SequenceID >= entry.Source.SequenceID {
|
|
||||||
// Item in the cache has the same or newer SequenceID than
|
|
||||||
// this event. Ignore this event, it must be old.
|
|
||||||
f.itemMetaCacheMu.Unlock()
|
|
||||||
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This event is newer. Delete its entry from the cache,
|
|
||||||
// we'll notify about its change below, then it's up to a
|
|
||||||
// future list operation to repopulate the cache.
|
|
||||||
delete(f.itemMetaCache, entry.Source.ID)
|
|
||||||
}
|
|
||||||
f.itemMetaCacheMu.Unlock()
|
|
||||||
|
|
||||||
entryType := fs.EntryDirectory
|
|
||||||
if entry.Source.Type == api.ItemTypeFile {
|
|
||||||
entryType = fs.EntryObject
|
|
||||||
}
|
|
||||||
|
|
||||||
// The box event only includes the new path for the object (e.g.
|
|
||||||
// the path after the object was moved). If there was an old path
|
|
||||||
// saved in our cache, it must be cleared.
|
|
||||||
if cachedItemMetaFound {
|
|
||||||
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
|
||||||
if path != "" {
|
|
||||||
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
|
|
||||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "%s old parent not cached", eventDetails)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is a directory, also delete it from the dir cache.
|
|
||||||
// This will effectively invalidate the item metadata cache
|
|
||||||
// entries for all descendents of this directory, since we
|
|
||||||
// will no longer be able to construct a full path for them.
|
|
||||||
// This is exactly what we want, since we don't want to notify
|
|
||||||
// on the paths of these descendents if one of their ancestors
|
|
||||||
// has been renamed/deleted.
|
|
||||||
if entry.Source.Type == api.ItemTypeFolder {
|
|
||||||
f.dirCache.FlushDir(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the item is "active", then it is not trashed or deleted, so
|
|
||||||
// it potentially has a valid parent.
|
|
||||||
//
|
|
||||||
// Construct the new path of the object, based on the Parent ID
|
|
||||||
// and its name. If we get an empty result, it means we don't
|
|
||||||
// currently know about this object so notification is unnecessary.
|
|
||||||
if entry.Source.ItemStatus == api.ItemStatusActive {
|
|
||||||
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
|
||||||
if path != "" {
|
|
||||||
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
|
|
||||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "%s new parent not found", eventDetails)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// box can sometimes repeatedly return the same Event IDs within a
|
|
||||||
// short period of time. If it stops giving us new ones, treat it
|
|
||||||
// the same as if it returned us none at all.
|
|
||||||
if newEventIDs == 0 {
|
|
||||||
return nextStreamPosition, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
notifiedPaths := make(map[string]bool)
|
|
||||||
for _, p := range pathsToClear {
|
|
||||||
if _, ok := notifiedPaths[p.path]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
notifiedPaths[p.path] = true
|
|
||||||
notifyFunc(p.path, p.entryType)
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing as an
|
// DirCacheFlush resets the directory cache - used in testing as an
|
||||||
// optional interface
|
// optional interface
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
|
@ -1725,7 +1399,6 @@ var (
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|
23
backend/cache/cache.go
vendored
23
backend/cache/cache.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
// Package cache implements a virtual provider to cache existing remotes.
|
||||||
package cache
|
package cache
|
||||||
|
@ -409,16 +410,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
} else {
|
||||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
if err != nil {
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
decPass = opt.PlexPassword
|
if err != nil {
|
||||||
}
|
decPass = opt.PlexPassword
|
||||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
}
|
||||||
m.Set("plex_token", token)
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||||
})
|
m.Set("plex_token", token)
|
||||||
if err != nil {
|
})
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
94
backend/cache/cache_internal_test.go
vendored
94
backend/cache/cache_internal_test.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
@ -10,6 +11,7 @@ import (
|
||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -28,11 +30,10 @@ import (
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/testy"
|
"github.com/rclone/rclone/fstest/testy"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfsflags"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
|
||||||
goflag.Parse()
|
goflag.Parse()
|
||||||
var rc int
|
var rc int
|
||||||
|
|
||||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||||
runInstance = newRun()
|
runInstance = newRun()
|
||||||
rc = m.Run()
|
rc = m.Run()
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
|
@ -122,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||||
|
|
||||||
/* TODO: is this testing something?
|
/* TODO: is this testing something?
|
||||||
func TestInternalVfsCache(t *testing.T) {
|
func TestInternalVfsCache(t *testing.T) {
|
||||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||||
testSize := int64(524288000)
|
testSize := int64(524288000)
|
||||||
|
|
||||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
@ -337,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||||
|
|
||||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
|
@ -367,7 +368,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
|
|
||||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
|
@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "original size: %v", originalSize)
|
log.Printf("original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -416,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||||
} else {
|
} else {
|
||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
|
@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
fs.Logf(nil, "updated size: %v", len(data2))
|
log.Printf("updated size: %v", len(data2))
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
if runInstance.wrappedIsExternal {
|
if runInstance.wrappedIsExternal {
|
||||||
|
@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
li, err := runInstance.list(t, rootFs, "test")
|
li, err := runInstance.list(t, rootFs, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
log.Printf("not expected listing /test: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test: %v", li)
|
return fmt.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
log.Printf("not expected listing /test/second: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Logf(nil, "unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Logf(nil, "complete listing: %v", li)
|
log.Printf("complete listing: %v", li)
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test")
|
log.Printf("not found /test")
|
||||||
return fmt.Errorf("not found /test")
|
return fmt.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test/one")
|
log.Printf("not found /test/one")
|
||||||
return fmt.Errorf("not found /test/one")
|
return fmt.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
fs.Logf(nil, "not found /test/one/test2")
|
log.Printf("not found /test/one/test2")
|
||||||
return fmt.Errorf("not found /test/one/test2")
|
return fmt.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(nil, "err: %v", err)
|
log.Printf("err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Logf(nil, "unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
fs.Logf(nil, "complete listing /test/one/test2")
|
log.Printf("complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -707,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||||
|
|
||||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -742,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalBug2117(t *testing.T) {
|
func TestInternalBug2117(t *testing.T) {
|
||||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||||
|
|
||||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||||
|
@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
|
||||||
|
|
||||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
time.Sleep(time.Second * 30)
|
time.Sleep(time.Second * 30)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test")
|
di, err = runInstance.list(t, rootFs, "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fs.Logf(nil, "len: %v", len(di))
|
log.Printf("len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -828,7 +829,7 @@ func newRun() *run {
|
||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
}
|
}
|
||||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -849,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||||
fstest.Initialise()
|
fstest.Initialise()
|
||||||
remoteExists := false
|
remoteExists := false
|
||||||
for _, s := range config.GetRemotes() {
|
for _, s := range config.FileSections() {
|
||||||
if s.Name == remote {
|
if s == remote {
|
||||||
remoteExists = true
|
remoteExists = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -874,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||||
cacheRemote := remote
|
cacheRemote := remote
|
||||||
if !remoteExists {
|
if !remoteExists {
|
||||||
localRemote := remote + "-local"
|
localRemote := remote + "-local"
|
||||||
config.FileSetValue(localRemote, "type", "local")
|
config.FileSet(localRemote, "type", "local")
|
||||||
config.FileSetValue(localRemote, "nounc", "true")
|
config.FileSet(localRemote, "nounc", "true")
|
||||||
m.Set("type", "cache")
|
m.Set("type", "cache")
|
||||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||||
} else {
|
} else {
|
||||||
remoteType := config.GetValue(remote, "type")
|
remoteType := config.FileGet(remote, "type")
|
||||||
if remoteType == "" {
|
if remoteType == "" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -890,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||||
m.Set("password", cryptPassword1)
|
m.Set("password", cryptPassword1)
|
||||||
m.Set("password2", cryptPassword2)
|
m.Set("password2", cryptPassword2)
|
||||||
}
|
}
|
||||||
remoteRemote := config.GetValue(remote, "remote")
|
remoteRemote := config.FileGet(remote, "remote")
|
||||||
if remoteRemote == "" {
|
if remoteRemote == "" {
|
||||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||||
remoteWrapping := remoteRemoteParts[0]
|
remoteWrapping := remoteRemoteParts[0]
|
||||||
remoteType := config.GetValue(remoteWrapping, "type")
|
remoteType := config.FileGet(remoteWrapping, "type")
|
||||||
if remoteType != "cache" {
|
if remoteType != "cache" {
|
||||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -934,7 +935,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = operations.Purge(context.Background(), f, "")
|
_ = f.Features().Purge(context.Background(), "")
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -947,7 +949,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||||
err := operations.Purge(context.Background(), f, "")
|
err := f.Features().Purge(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1191,7 +1193,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||||
if r.rootIsCrypt {
|
if r.rootIsCrypt {
|
||||||
denominator := int64(65536 + 16)
|
denominator := int64(65536 + 16)
|
||||||
size -= 32
|
size = size - 32
|
||||||
quotient := size / denominator
|
quotient := size / denominator
|
||||||
remainder := size % denominator
|
remainder := size % denominator
|
||||||
return (quotient*65536 + remainder - 16)
|
return (quotient*65536 + remainder - 16)
|
||||||
|
|
12
backend/cache/cache_test.go
vendored
12
backend/cache/cache_test.go
vendored
|
@ -1,6 +1,7 @@
|
||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
@ -15,11 +16,10 @@ import (
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
|
@ -2,6 +2,6 @@
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
//go:build plan9 || js
|
||||||
|
// +build plan9 js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|
1
backend/cache/cache_upload_test.go
vendored
1
backend/cache/cache_upload_test.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js && !race
|
//go:build !plan9 && !js && !race
|
||||||
|
// +build !plan9,!js,!race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
|
|
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|
13
backend/cache/handle.go
vendored
13
backend/cache/handle.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
|
||||||
r.scaleWorkers(totalWorkers)
|
r.scaleWorkers(totalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// scaleWorkers will increase the worker pool count by the provided amount
|
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||||
func (r *Handle) scaleWorkers(desired int) {
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
current := r.workers
|
current := r.workers
|
||||||
if current == desired {
|
if current == desired {
|
||||||
|
@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||||
|
|
||||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||||
chunkStart -= offset
|
chunkStart = chunkStart - offset
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
|
@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||||
}
|
}
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
|
|
||||||
|
@ -415,8 +416,10 @@ func (w *worker) run() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
} else {
|
||||||
continue
|
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
|
|
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|
1
backend/cache/storage_persistent.go
vendored
1
backend/cache/storage_persistent.go
vendored
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9 && !js
|
//go:build !plan9 && !js
|
||||||
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
|
3
backend/cache/utils_test.go
vendored
3
backend/cache/utils_test.go
vendored
|
@ -1,6 +1,3 @@
|
||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import bolt "go.etcd.io/bbolt"
|
import bolt "go.etcd.io/bbolt"
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Chunker's composite files have one or more chunks
|
// Chunker's composite files have one or more chunks
|
||||||
|
@ -102,10 +101,8 @@ var (
|
||||||
//
|
//
|
||||||
// And still chunker's primary function is to chunk large files
|
// And still chunker's primary function is to chunk large files
|
||||||
// rather than serve as a generic metadata container.
|
// rather than serve as a generic metadata container.
|
||||||
const (
|
const maxMetadataSize = 1023
|
||||||
maxMetadataSize = 1023
|
const maxMetadataSizeWritten = 255
|
||||||
maxMetadataSizeWritten = 255
|
|
||||||
)
|
|
||||||
|
|
||||||
// Current/highest supported metadata format.
|
// Current/highest supported metadata format.
|
||||||
const metadataVersion = 2
|
const metadataVersion = 2
|
||||||
|
@ -308,6 +305,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
}
|
}
|
||||||
|
cache.PinUntilFinalized(f.base, f)
|
||||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||||
|
|
||||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||||
|
@ -319,41 +317,26 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||||
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
||||||
// detects a composite file because it finds the first chunk!
|
// detects a composite file because it finds the first chunk!
|
||||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||||
if err == nil && !f.useMeta {
|
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||||
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||||
if testErr == fs.ErrorIsFile {
|
if testErr == fs.ErrorIsFile {
|
||||||
f.base = newBase
|
|
||||||
err = testErr
|
err = testErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.base, f)
|
|
||||||
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note 1: the features here are ones we could support, and they are
|
// Note 1: the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs.
|
// ANDed with the ones from wrappedFs.
|
||||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||||
// but features.Mask() will nullify it if wrappedFs does not have it.
|
// but features.Mask() will nullify it if wrappedFs does not have it.
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // Object.MimeType not supported
|
ReadMimeType: false, // Object.MimeType not supported
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ServerSideAcrossConfigs: true,
|
ServerSideAcrossConfigs: true,
|
||||||
ReadDirMetadata: true,
|
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||||
|
|
||||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||||
|
@ -830,7 +813,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||||
}
|
}
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
isSubdir[entry.Remote()] = true
|
isSubdir[entry.Remote()] = true
|
||||||
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
|
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||||
|
wrapDir.SetRemote(entry.Remote())
|
||||||
tempEntries = append(tempEntries, wrapDir)
|
tempEntries = append(tempEntries, wrapDir)
|
||||||
default:
|
default:
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
|
@ -963,11 +947,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||||
}
|
}
|
||||||
if caseInsensitive {
|
if caseInsensitive {
|
||||||
sameMain = strings.EqualFold(mainRemote, remote)
|
sameMain = strings.EqualFold(mainRemote, remote)
|
||||||
if sameMain && f.base.Features().IsLocal {
|
|
||||||
// on local, make sure the EqualFold still holds true when accounting for encoding.
|
|
||||||
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
|
|
||||||
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
sameMain = mainRemote == remote
|
sameMain = mainRemote == remote
|
||||||
}
|
}
|
||||||
|
@ -981,13 +960,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.main == nil && len(o.chunks) == 0 {
|
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||||
// Scanning hasn't found data chunks with conforming names.
|
// Scanning hasn't found data chunks with conforming names.
|
||||||
if f.useMeta || quickScan {
|
if f.useMeta || quickScan {
|
||||||
// Metadata is required but absent and there are no chunks.
|
// Metadata is required but absent and there are no chunks.
|
||||||
|
@ -1143,8 +1122,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||||
// put implements Put, PutStream, PutUnchecked, Update
|
// put implements Put, PutStream, PutUnchecked, Update
|
||||||
func (f *Fs) put(
|
func (f *Fs) put(
|
||||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||||
basePut putFn, action string, target fs.Object,
|
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||||
) (obj fs.Object, err error) {
|
|
||||||
// Perform consistency checks
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||||
|
@ -1584,14 +1563,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return f.base.Mkdir(ctx, dir)
|
return f.base.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.base.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
@ -1909,14 +1880,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||||
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.base.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
|
@ -1965,7 +1928,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||||
if entryType == fs.EntryObject {
|
if entryType == fs.EntryObject {
|
||||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||||
metaXactID := ""
|
metaXactID := ""
|
||||||
|
@ -2577,8 +2540,6 @@ var (
|
||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
|
|
|
@ -36,12 +36,10 @@ func TestIntegration(t *testing.T) {
|
||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
"Metadata",
|
||||||
"SetMetadata",
|
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
"MergeDirs",
|
||||||
"DirCacheFlush",
|
"DirCacheFlush",
|
||||||
"UserInfo",
|
"UserInfo",
|
||||||
|
|
|
@ -222,23 +222,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||||
}
|
}
|
||||||
// check features
|
// check features
|
||||||
var features = (&fs.Features{
|
var features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
canMove := true
|
canMove := true
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
|
@ -445,32 +440,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return u.f.Mkdir(ctx, uRemote)
|
return u.f.Mkdir(ctx, uRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
u, uRemote, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do := u.f.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, uRemote, metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries := fs.DirEntries{newDir}
|
|
||||||
entries, err = u.wrapEntries(ctx, entries)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// purge the upstream or fallback to a slow way
|
// purge the upstream or fallback to a slow way
|
||||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||||
if do := u.f.Features().Purge; do != nil {
|
if do := u.f.Features().Purge; do != nil {
|
||||||
|
@ -786,11 +755,12 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
entries[i] = u.newObject(x)
|
entries[i] = u.newObject(x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
newPath, err := u.pathAdjustment.do(x.Remote())
|
newDir := fs.NewDirCopy(ctx, x)
|
||||||
|
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(newPath, x)
|
newDir.SetRemote(newPath)
|
||||||
entries[i] = newDir
|
entries[i] = newDir
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||||
|
@ -813,7 +783,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||||
if f.root == "" && dir == "" {
|
if f.root == "" && dir == "" {
|
||||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||||
for combineDir := range f.upstreams {
|
for combineDir := range f.upstreams {
|
||||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
d := fs.NewDir(combineDir, f.when)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
return entries, nil
|
return entries, nil
|
||||||
|
@ -944,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||||
return do(ctx, uRemote, expire, unlink)
|
return do(ctx, uRemote, expire, unlink)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
//
|
//
|
||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
|
@ -995,22 +965,6 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
return do(ctx, uDirs)
|
return do(ctx, uDirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
u, uDir, err := f.findUpstream(dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uDir == "" {
|
|
||||||
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if do := u.f.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, uDir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
|
@ -1119,17 +1073,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier performs changing storage tier of the Object if
|
// SetTier performs changing storage tier of the Object if
|
||||||
// multiple storage classes supported
|
// multiple storage classes supported
|
||||||
func (o *Object) SetTier(tier string) error {
|
func (o *Object) SetTier(tier string) error {
|
||||||
|
@ -1156,8 +1099,6 @@ var (
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.FullObject = (*Object)(nil)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||||
unimplementableObjectMethods = []string{}
|
unimplementableObjectMethods = []string{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -38,7 +37,6 @@ import (
|
||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||||
chunkStreams = 0 // Streams to use for reading
|
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
|
@ -174,33 +172,21 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionModeFromName(opt.CompressionMode),
|
mode: compressionModeFromName(opt.CompressionMode),
|
||||||
}
|
}
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
DuplicateFiles: false,
|
DuplicateFiles: false,
|
||||||
ReadMimeType: false,
|
ReadMimeType: false,
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// We support reading MIME types no matter the wrapped fs
|
// We support reading MIME types no matter the wrapped fs
|
||||||
f.features.ReadMimeType = true
|
f.features.ReadMimeType = true
|
||||||
|
@ -271,16 +257,6 @@ func isMetadataFile(filename string) bool {
|
||||||
return strings.HasSuffix(filename, metaFileExt)
|
return strings.HasSuffix(filename, metaFileExt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks whether a file is a metadata file and returns the original
|
|
||||||
// file name and a flag indicating whether it was a metadata file or
|
|
||||||
// not.
|
|
||||||
func unwrapMetadataFile(filename string) (string, bool) {
|
|
||||||
if !isMetadataFile(filename) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return filename[:len(filename)-len(metaFileExt)], true
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeDataName generates the file name for a data file with specified compression mode
|
// makeDataName generates the file name for a data file with specified compression mode
|
||||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||||
if mode != Uncompressed {
|
if mode != Uncompressed {
|
||||||
|
@ -456,7 +432,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -790,14 +766,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return f.Fs.Mkdir(ctx, dir)
|
return f.Fs.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
@ -941,14 +909,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||||
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.Fs.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
|
@ -1019,8 +979,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||||
var (
|
var (
|
||||||
wrappedPath string
|
wrappedPath string
|
||||||
isMetadataFile bool
|
|
||||||
)
|
)
|
||||||
switch entryType {
|
switch entryType {
|
||||||
case fs.EntryDirectory:
|
case fs.EntryDirectory:
|
||||||
|
@ -1028,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||||
case fs.EntryObject:
|
case fs.EntryObject:
|
||||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||||
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
wrappedPath = makeMetadataName(path)
|
||||||
if !isMetadataFile {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||||
return
|
return
|
||||||
|
@ -1287,17 +1243,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
|
@ -1363,7 +1308,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get a chunkedreader for the wrapped object
|
// Get a chunkedreader for the wrapped object
|
||||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||||
// Get file handle
|
// Get file handle
|
||||||
var file io.Reader
|
var file io.Reader
|
||||||
if offset != 0 {
|
if offset != 0 {
|
||||||
|
@ -1530,8 +1475,6 @@ var (
|
||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
_ fs.CleanUpper = (*Fs)(nil)
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
|
|
|
@ -14,26 +14,23 @@ import (
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultOpt = fstests.Opt{
|
|
||||||
RemoteName: "TestCompress:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
|
||||||
"DirCacheFlush",
|
|
||||||
"PutUnchecked",
|
|
||||||
"PutStream",
|
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
|
||||||
UnimplementableObjectMethods: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &defaultOpt)
|
opt := fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
|
UnimplementableObjectMethods: []string{}}
|
||||||
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRemoteGzip tests GZIP compression
|
// TestRemoteGzip tests GZIP compression
|
||||||
|
@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
|
||||||
}
|
}
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||||
name := "TestCompressGzip"
|
name := "TestCompressGzip"
|
||||||
opt := defaultOpt
|
fstests.Run(t, &fstests.Opt{
|
||||||
opt.RemoteName = name + ":"
|
RemoteName: name + ":",
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
NilObject: (*Object)(nil),
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
UnimplementableFsMethods: []string{
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
"OpenWriterAt",
|
||||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
"MergeDirs",
|
||||||
}
|
"DirCacheFlush",
|
||||||
opt.QuickTestOK = true
|
"PutUnchecked",
|
||||||
fstests.Run(t, &opt)
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
UnimplementableObjectMethods: []string{
|
||||||
|
"GetTier",
|
||||||
|
"SetTier",
|
||||||
|
},
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
|
},
|
||||||
|
QuickTestOK: true,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||||
for _, runeValue := range plaintext {
|
for _, runeValue := range plaintext {
|
||||||
dir += int(runeValue)
|
dir += int(runeValue)
|
||||||
}
|
}
|
||||||
dir %= 256
|
dir = dir % 256
|
||||||
|
|
||||||
// We'll use this number to store in the result filename...
|
// We'll use this number to store in the result filename...
|
||||||
var result bytes.Buffer
|
var result bytes.Buffer
|
||||||
|
@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||||
if pos >= 26 {
|
if pos >= 26 {
|
||||||
pos -= 6
|
pos -= 6
|
||||||
}
|
}
|
||||||
pos -= thisdir
|
pos = pos - thisdir
|
||||||
if pos < 0 {
|
if pos < 0 {
|
||||||
pos += 52
|
pos += 52
|
||||||
}
|
}
|
||||||
|
@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||||
// Zero out the bad block and continue
|
// Zero out the bad block and continue
|
||||||
for i := range (*fh.buf)[:n] {
|
for i := range (*fh.buf)[:n] {
|
||||||
fh.buf[i] = 0
|
(*fh.buf)[i] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
|
|
|
@ -130,16 +130,6 @@ trying to recover an encrypted file with errors and it is desired to
|
||||||
recover as much of the file as possible.`,
|
recover as much of the file as possible.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "strict_names",
|
|
||||||
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
|
|
||||||
|
|
||||||
(By default, rclone will just log a NOTICE and continue as normal.)
|
|
||||||
This can happen if encrypted and unencrypted files are stored in the same
|
|
||||||
directory (which is not recommended.) It may also indicate a more serious
|
|
||||||
problem that should be investigated.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encoding",
|
Name: "filename_encoding",
|
||||||
Help: `How to encode the encrypted filename to text string.
|
Help: `How to encode the encrypted filename to text string.
|
||||||
|
@ -263,34 +253,22 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
|
@ -309,7 +287,6 @@ type Options struct {
|
||||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||||
FilenameEncoding string `config:"filename_encoding"`
|
FilenameEncoding string `config:"filename_encoding"`
|
||||||
Suffix string `config:"suffix"`
|
Suffix string `config:"suffix"`
|
||||||
StrictNames bool `config:"strict_names"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
|
@ -344,64 +321,45 @@ func (f *Fs) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an object file name to entries.
|
// Encrypt an object file name to entries.
|
||||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
|
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||||
remote := obj.Remote()
|
remote := obj.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newObject(obj))
|
*entries = append(*entries, f.newObject(obj))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt a directory file name to entries.
|
// Encrypt a directory file name to entries.
|
||||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if f.opt.StrictNames {
|
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||||
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
|
return
|
||||||
}
|
|
||||||
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(ctx, dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
errors := 0
|
|
||||||
var firsterr error
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
err = f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
err = f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
errors++
|
|
||||||
if firsterr == nil {
|
|
||||||
firsterr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if firsterr != nil {
|
|
||||||
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
|
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
}
|
}
|
||||||
|
@ -520,7 +478,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash)
|
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
|
@ -555,37 +513,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
do := f.Fs.Features().MkdirMetadata
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var entries = make(fs.DirEntries, 0, 1)
|
|
||||||
err = f.addDir(ctx, &entries, newDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
newDir, ok := entries[0].(fs.Directory)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
|
||||||
}
|
|
||||||
return newDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
do := f.Fs.Features().DirSetModTime
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
|
@ -827,7 +754,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
}
|
}
|
||||||
out := make([]fs.Directory, len(dirs))
|
out := make([]fs.Directory, len(dirs))
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
|
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||||
}
|
}
|
||||||
return do(ctx, out)
|
return do(ctx, out)
|
||||||
}
|
}
|
||||||
|
@ -1063,14 +990,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
|
newDir := fs.NewDirCopy(ctx, dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||||
} else {
|
} else {
|
||||||
remote = decryptedRemote
|
newDir.SetRemote(decryptedRemote)
|
||||||
}
|
}
|
||||||
newDir := fs.NewDirWrapper(remote, dir)
|
|
||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1248,17 +1175,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
// MimeType returns the content type of the Object if
|
||||||
// known, or "" if not
|
// known, or "" if not
|
||||||
//
|
//
|
||||||
|
@ -1284,8 +1200,6 @@ var (
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*crypt.Object)(nil),
|
NilObject: (*crypt.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
|
||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
QuickTestOK: true,
|
||||||
})
|
})
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -524,49 +524,12 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
|
|
||||||
func (f *Fs) InternalTestQuery(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
var err error
|
|
||||||
t.Run("BadQuery", func(t *testing.T) {
|
|
||||||
_, err = f.query(ctx, "this is a bad query")
|
|
||||||
require.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "failed to execute query")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NoMatch", func(t *testing.T) {
|
|
||||||
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, results, 0)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("GoodQuery", func(t *testing.T) {
|
|
||||||
pathSegments := strings.Split(existingFile, "/")
|
|
||||||
var parent string
|
|
||||||
for _, item := range pathSegments {
|
|
||||||
// the file name contains ' characters which must be escaped
|
|
||||||
escapedItem := f.opt.Enc.FromStandardName(item)
|
|
||||||
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
|
|
||||||
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
|
|
||||||
|
|
||||||
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, len(results) > 0)
|
|
||||||
for _, result := range results {
|
|
||||||
assert.True(t, len(result.Id) > 0)
|
|
||||||
assert.Equal(t, result.Name, item)
|
|
||||||
}
|
|
||||||
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||||
// Check set up for filtering
|
// Check set up for filtering
|
||||||
assert.True(t, f.Features().FilterAware)
|
assert.True(t, f.Features().FilterAware)
|
||||||
|
|
||||||
opt := &filter.Options{}
|
opt := &filter.Opt{}
|
||||||
err := opt.MaxAge.Set("1h")
|
err := opt.MaxAge.Set("1h")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
flt, err := filter.NewFilter(opt)
|
flt, err := filter.NewFilter(opt)
|
||||||
|
@ -648,7 +611,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyID", f.InternalTestCopyID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
t.Run("Query", f.InternalTestQuery)
|
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,638 +0,0 @@
|
||||||
package drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/errcount"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
drive "google.golang.org/api/drive/v3"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
// system metadata keys which this backend owns
|
|
||||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|
||||||
"content-type": {
|
|
||||||
Help: "The MIME type of the file.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "text/plain",
|
|
||||||
},
|
|
||||||
"mtime": {
|
|
||||||
Help: "Time of last modification with mS accuracy.",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
|
||||||
},
|
|
||||||
"btime": {
|
|
||||||
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
|
||||||
},
|
|
||||||
"copy-requires-writer-permission": {
|
|
||||||
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
},
|
|
||||||
"writers-can-share": {
|
|
||||||
Help: "Whether users with only writer permission can modify the file's permissions. Not populated and ignored when setting for items in shared drives.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "false",
|
|
||||||
},
|
|
||||||
"viewed-by-me": {
|
|
||||||
Help: "Whether the file has been viewed by this user.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"owner": {
|
|
||||||
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "user@example.com",
|
|
||||||
},
|
|
||||||
"permissions": {
|
|
||||||
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
|
|
||||||
Type: "JSON",
|
|
||||||
Example: "{}",
|
|
||||||
},
|
|
||||||
"folder-color-rgb": {
|
|
||||||
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "881133",
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
Help: "A short description of the file.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "Contract for signing",
|
|
||||||
},
|
|
||||||
"starred": {
|
|
||||||
Help: "Whether the user has starred the file.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "false",
|
|
||||||
},
|
|
||||||
"labels": {
|
|
||||||
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
|
|
||||||
Type: "JSON",
|
|
||||||
Example: "[]",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extra fields we need to fetch to implement the system metadata above
|
|
||||||
var metadataFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"copyRequiresWriterPermission",
|
|
||||||
"description",
|
|
||||||
"folderColorRgb",
|
|
||||||
"hasAugmentedPermissions",
|
|
||||||
"owners",
|
|
||||||
"permissionIds",
|
|
||||||
"permissions",
|
|
||||||
"properties",
|
|
||||||
"starred",
|
|
||||||
"viewedByMe",
|
|
||||||
"viewedByMeTime",
|
|
||||||
"writersCanShare",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// Fields we need to read from permissions
|
|
||||||
var permissionsFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"*",
|
|
||||||
"permissionDetails/*",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// getPermission returns permissions for the fileID and permissionID passed in
|
|
||||||
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
|
|
||||||
f.permissionsMu.Lock()
|
|
||||||
defer f.permissionsMu.Unlock()
|
|
||||||
if useCache {
|
|
||||||
perm = f.permissions[permissionID]
|
|
||||||
if perm != nil {
|
|
||||||
return perm, false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "Fetching permission %q", permissionID)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
perm, err = f.svc.Permissions.Get(fileID, permissionID).
|
|
||||||
Fields(permissionsFields).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
|
|
||||||
|
|
||||||
cleanPermission(perm)
|
|
||||||
|
|
||||||
// cache the permission
|
|
||||||
f.permissions[permissionID] = perm
|
|
||||||
|
|
||||||
return perm, inherited, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the permissions on the info
|
|
||||||
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
|
|
||||||
errs := errcount.New()
|
|
||||||
for _, perm := range permissions {
|
|
||||||
if perm.Role == "owner" {
|
|
||||||
// ignore owner permissions - these are set with owner
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cleanPermissionForWrite(perm)
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
SendNotificationEmail(false).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
|
|
||||||
errs.Add(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = errs.Err("failed to set permission")
|
|
||||||
if err != nil {
|
|
||||||
err = fserrors.NoRetryError(err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean attributes from permissions which we can't write
|
|
||||||
func cleanPermissionForWrite(perm *drive.Permission) {
|
|
||||||
perm.Deleted = false
|
|
||||||
perm.DisplayName = ""
|
|
||||||
perm.Id = ""
|
|
||||||
perm.Kind = ""
|
|
||||||
perm.PermissionDetails = nil
|
|
||||||
perm.TeamDrivePermissionDetails = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean and cache the permission if not already cached
|
|
||||||
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
|
|
||||||
f.permissionsMu.Lock()
|
|
||||||
defer f.permissionsMu.Unlock()
|
|
||||||
cleanPermission(perm)
|
|
||||||
if _, found := f.permissions[perm.Id]; !found {
|
|
||||||
f.permissions[perm.Id] = perm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean fields we don't need to keep from the permission
|
|
||||||
func cleanPermission(perm *drive.Permission) {
|
|
||||||
// DisplayName: Output only. The "pretty" name of the value of the
|
|
||||||
// permission. The following is a list of examples for each type of
|
|
||||||
// permission: * `user` - User's full name, as defined for their Google
|
|
||||||
// account, such as "Joe Smith." * `group` - Name of the Google Group,
|
|
||||||
// such as "The Company Administrators." * `domain` - String domain
|
|
||||||
// name, such as "thecompany.com." * `anyone` - No `displayName` is
|
|
||||||
// present.
|
|
||||||
perm.DisplayName = ""
|
|
||||||
|
|
||||||
// Kind: Output only. Identifies what kind of resource this is. Value:
|
|
||||||
// the fixed string "drive#permission".
|
|
||||||
perm.Kind = ""
|
|
||||||
|
|
||||||
// PermissionDetails: Output only. Details of whether the permissions on
|
|
||||||
// this shared drive item are inherited or directly on this item. This
|
|
||||||
// is an output-only field which is present only for shared drive items.
|
|
||||||
perm.PermissionDetails = nil
|
|
||||||
|
|
||||||
// PhotoLink: Output only. A link to the user's profile photo, if
|
|
||||||
// available.
|
|
||||||
perm.PhotoLink = ""
|
|
||||||
|
|
||||||
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
|
|
||||||
// `permissionDetails` instead.
|
|
||||||
perm.TeamDrivePermissionDetails = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields we need to read from labels
|
|
||||||
var labelsFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"*",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// getLabels returns labels for the fileID passed in
|
|
||||||
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
|
|
||||||
fs.Debugf(f, "Fetching labels for %q", fileID)
|
|
||||||
listLabels := f.svc.Files.ListLabels(fileID).
|
|
||||||
Fields(labelsFields).
|
|
||||||
Context(ctx)
|
|
||||||
for {
|
|
||||||
var info *drive.LabelList
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
info, err = listLabels.Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
labels = append(labels, info.Labels...)
|
|
||||||
if info.NextPageToken == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
listLabels.PageToken(info.NextPageToken)
|
|
||||||
}
|
|
||||||
for _, label := range labels {
|
|
||||||
cleanLabel(label)
|
|
||||||
}
|
|
||||||
return labels, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the labels on the info
|
|
||||||
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
|
|
||||||
if len(labels) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
req := drive.ModifyLabelsRequest{}
|
|
||||||
for _, label := range labels {
|
|
||||||
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
|
|
||||||
FieldModifications: labelFieldsToFieldModifications(label.Fields),
|
|
||||||
LabelId: label.Id,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set labels: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert label fields into something which can set the fields
|
|
||||||
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
|
|
||||||
for id, field := range fields {
|
|
||||||
var emails []string
|
|
||||||
for _, user := range field.User {
|
|
||||||
emails = append(emails, user.EmailAddress)
|
|
||||||
}
|
|
||||||
out = append(out, &drive.LabelFieldModification{
|
|
||||||
// FieldId: The ID of the field to be modified.
|
|
||||||
FieldId: id,
|
|
||||||
|
|
||||||
// SetDateValues: Replaces the value of a dateString Field with these
|
|
||||||
// new values. The string must be in the RFC 3339 full-date format:
|
|
||||||
// YYYY-MM-DD.
|
|
||||||
SetDateValues: field.DateString,
|
|
||||||
|
|
||||||
// SetIntegerValues: Replaces the value of an `integer` field with these
|
|
||||||
// new values.
|
|
||||||
SetIntegerValues: field.Integer,
|
|
||||||
|
|
||||||
// SetSelectionValues: Replaces a `selection` field with these new
|
|
||||||
// values.
|
|
||||||
SetSelectionValues: field.Selection,
|
|
||||||
|
|
||||||
// SetTextValues: Sets the value of a `text` field.
|
|
||||||
SetTextValues: field.Text,
|
|
||||||
|
|
||||||
// SetUserValues: Replaces a `user` field with these new values. The
|
|
||||||
// values must be valid email addresses.
|
|
||||||
SetUserValues: emails,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean fields we don't need to keep from the label
|
|
||||||
func cleanLabel(label *drive.Label) {
|
|
||||||
// Kind: This is always drive#label
|
|
||||||
label.Kind = ""
|
|
||||||
|
|
||||||
for name, field := range label.Fields {
|
|
||||||
// Kind: This is always drive#labelField.
|
|
||||||
field.Kind = ""
|
|
||||||
|
|
||||||
// Note the fields are copies so we need to write them
|
|
||||||
// back to the map
|
|
||||||
label.Fields[name] = field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the metadata from drive item
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
|
|
||||||
metadata := make(fs.Metadata, 16)
|
|
||||||
|
|
||||||
// Dump user metadata first as it overrides system metadata
|
|
||||||
for k, v := range info.Properties {
|
|
||||||
metadata[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// System metadata
|
|
||||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
|
||||||
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
|
|
||||||
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
|
|
||||||
metadata["content-type"] = info.MimeType
|
|
||||||
|
|
||||||
// Owners: Output only. The owner of this file. Only certain legacy
|
|
||||||
// files may have more than one owner. This field isn't populated for
|
|
||||||
// items in shared drives.
|
|
||||||
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
|
|
||||||
user := info.Owners[0]
|
|
||||||
if len(info.Owners) > 1 {
|
|
||||||
fs.Logf(o, "Ignoring more than 1 owner")
|
|
||||||
}
|
|
||||||
if user != nil {
|
|
||||||
id := user.EmailAddress
|
|
||||||
if id == "" {
|
|
||||||
id = user.DisplayName
|
|
||||||
}
|
|
||||||
metadata["owner"] = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
|
|
||||||
// We only write permissions out if they are not inherited.
|
|
||||||
//
|
|
||||||
// On My Drives permissions seem to be attached to every item
|
|
||||||
// so they will always be written out.
|
|
||||||
//
|
|
||||||
// On Shared Drives only non-inherited permissions will be
|
|
||||||
// written out.
|
|
||||||
|
|
||||||
// To read the inherited permissions flag will mean we need to
|
|
||||||
// read the permissions for each object and the cache will be
|
|
||||||
// useless. However shared drives don't return permissions
|
|
||||||
// only permissionIds so will need to fetch them for each
|
|
||||||
// object. We use HasAugmentedPermissions to see if there are
|
|
||||||
// special permissions before fetching them to save transactions.
|
|
||||||
|
|
||||||
// HasAugmentedPermissions: Output only. Whether there are permissions
|
|
||||||
// directly on this file. This field is only populated for items in
|
|
||||||
// shared drives.
|
|
||||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
|
||||||
// Don't process permissions if there aren't any specifically set
|
|
||||||
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
|
|
||||||
info.Permissions = nil
|
|
||||||
info.PermissionIds = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PermissionIds: Output only. List of permission IDs for users with
|
|
||||||
// access to this file.
|
|
||||||
//
|
|
||||||
// Only process these if we have no Permissions
|
|
||||||
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
|
|
||||||
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
g.SetLimit(o.fs.ci.Checkers)
|
|
||||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
|
||||||
for _, permissionID := range info.PermissionIds {
|
|
||||||
permissionID := permissionID
|
|
||||||
g.Go(func() error {
|
|
||||||
// must fetch the team drive ones individually to check the inherited flag
|
|
||||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read permission: %w", err)
|
|
||||||
}
|
|
||||||
// Don't write inherited permissions out
|
|
||||||
if inherited {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Don't write owner role out - these are covered by the owner metadata
|
|
||||||
if perm.Role == "owner" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
info.Permissions = append(info.Permissions, perm)
|
|
||||||
mu.Unlock()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = g.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Clean the fetched permissions
|
|
||||||
for _, perm := range info.Permissions {
|
|
||||||
o.fs.cleanAndCachePermission(perm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permissions: Output only. The full list of permissions for the file.
|
|
||||||
// This is only available if the requesting user can share the file. Not
|
|
||||||
// populated for items in shared drives.
|
|
||||||
if len(info.Permissions) > 0 {
|
|
||||||
buf, err := json.Marshal(info.Permissions)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal permissions: %w", err)
|
|
||||||
}
|
|
||||||
metadata["permissions"] = string(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permission propagation
|
|
||||||
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
|
|
||||||
// Leads me to believe that in non shared drives, permissions
|
|
||||||
// are added to each item when you set permissions for a
|
|
||||||
// folder whereas in shared drives they are inherited and
|
|
||||||
// placed on the item directly.
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.FolderColorRgb != "" {
|
|
||||||
metadata["folder-color-rgb"] = info.FolderColorRgb
|
|
||||||
}
|
|
||||||
if info.Description != "" {
|
|
||||||
metadata["description"] = info.Description
|
|
||||||
}
|
|
||||||
metadata["starred"] = fmt.Sprint(info.Starred)
|
|
||||||
metadata["btime"] = info.CreatedTime
|
|
||||||
metadata["mtime"] = info.ModifiedTime
|
|
||||||
|
|
||||||
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
|
|
||||||
// FIXME would be really nice if we knew if files had labels
|
|
||||||
// before listing but we need to know all possible label IDs
|
|
||||||
// to get it in the listing.
|
|
||||||
|
|
||||||
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch labels: %w", err)
|
|
||||||
}
|
|
||||||
buf, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal labels: %w", err)
|
|
||||||
}
|
|
||||||
metadata["labels"] = string(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
o.metadata = &metadata
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the owner on the info
|
|
||||||
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
|
|
||||||
perm := drive.Permission{
|
|
||||||
Role: "owner",
|
|
||||||
EmailAddress: owner,
|
|
||||||
// Type: The type of the grantee. Valid values are: * `user` * `group` *
|
|
||||||
// `domain` * `anyone` When creating a permission, if `type` is `user`
|
|
||||||
// or `group`, you must provide an `emailAddress` for the user or group.
|
|
||||||
// When `type` is `domain`, you must provide a `domain`. There isn't
|
|
||||||
// extra information required for an `anyone` type.
|
|
||||||
Type: "user",
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Permissions.Create(info.Id, &perm).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
TransferOwnership(true).
|
|
||||||
// SendNotificationEmail(false). - required apparently!
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set owner: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call back to set metadata that can't be set on the upload/update
|
|
||||||
//
|
|
||||||
// The *drive.File passed in holds the current state of the drive.File
|
|
||||||
// and this should update it with any modifications.
|
|
||||||
type updateMetadataFn func(context.Context, *drive.File) error
|
|
||||||
|
|
||||||
// read the metadata from meta and write it into updateInfo
|
|
||||||
//
|
|
||||||
// update should be true if this is being used to create metadata for
|
|
||||||
// an update/PATCH call as the rules on what can be updated are
|
|
||||||
// slightly different there.
|
|
||||||
//
|
|
||||||
// It returns a callback which should be called to finish the updates
|
|
||||||
// after the data is uploaded.
|
|
||||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
|
||||||
callbackFns := []updateMetadataFn{}
|
|
||||||
callback = func(ctx context.Context, info *drive.File) error {
|
|
||||||
for _, fn := range callbackFns {
|
|
||||||
err := fn(ctx, info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// merge metadata into request and user metadata
|
|
||||||
for k, v := range meta {
|
|
||||||
k, v := k, v
|
|
||||||
// parse a boolean from v and write into out
|
|
||||||
parseBool := func(out *bool) error {
|
|
||||||
b, err := strconv.ParseBool(v)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
|
|
||||||
}
|
|
||||||
*out = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case "copy-requires-writer-permission":
|
|
||||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "writers-can-share":
|
|
||||||
if !f.isTeamDrive {
|
|
||||||
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "Ignoring %s=%s as can't set on shared drives", k, v)
|
|
||||||
}
|
|
||||||
case "viewed-by-me":
|
|
||||||
// Can't write this
|
|
||||||
case "content-type":
|
|
||||||
updateInfo.MimeType = v
|
|
||||||
case "owner":
|
|
||||||
if !f.opt.MetadataOwner.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Can't set Owner on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
err := f.setOwner(ctx, info, v)
|
|
||||||
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
|
|
||||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case "permissions":
|
|
||||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var perms []*drive.Permission
|
|
||||||
err := json.Unmarshal([]byte(v), &perms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
|
|
||||||
}
|
|
||||||
// Can't set Permissions on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
err := f.setPermissions(ctx, info, perms)
|
|
||||||
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
|
|
||||||
// We've already logged the permissions errors individually here
|
|
||||||
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case "labels":
|
|
||||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var labels []*drive.Label
|
|
||||||
err := json.Unmarshal([]byte(v), &labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
|
|
||||||
}
|
|
||||||
// Can't set Labels on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
err := f.setLabels(ctx, info, labels)
|
|
||||||
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
|
|
||||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case "folder-color-rgb":
|
|
||||||
updateInfo.FolderColorRgb = v
|
|
||||||
case "description":
|
|
||||||
updateInfo.Description = v
|
|
||||||
case "starred":
|
|
||||||
if err := parseBool(&updateInfo.Starred); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "btime":
|
|
||||||
if update {
|
|
||||||
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
|
|
||||||
} else {
|
|
||||||
updateInfo.CreatedTime = v
|
|
||||||
}
|
|
||||||
case "mtime":
|
|
||||||
updateInfo.ModifiedTime = v
|
|
||||||
default:
|
|
||||||
if updateInfo.Properties == nil {
|
|
||||||
updateInfo.Properties = make(map[string]string, 1)
|
|
||||||
}
|
|
||||||
updateInfo.Properties[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return callback, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch metadata and update updateInfo if --metadata is in use
|
|
||||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
|
|
||||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
return callback, nil
|
|
||||||
}
|
|
|
@ -8,19 +8,121 @@ package dropbox
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxBatchSize = 1000 // max size the batch can be
|
||||||
|
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
|
||||||
|
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
|
||||||
|
defaultBatchSizeAsync = 100 // default batch size if async
|
||||||
|
)
|
||||||
|
|
||||||
|
// batcher holds info about the current items waiting for upload
|
||||||
|
type batcher struct {
|
||||||
|
f *Fs // Fs this batch is part of
|
||||||
|
mode string // configured batch mode
|
||||||
|
size int // maximum size for batch
|
||||||
|
timeout time.Duration // idle timeout for batch
|
||||||
|
async bool // whether we are using async batching
|
||||||
|
in chan batcherRequest // incoming items to batch
|
||||||
|
closed chan struct{} // close to indicate batcher shut down
|
||||||
|
atexit atexit.FnHandle // atexit handle
|
||||||
|
shutOnce sync.Once // make sure we shutdown once only
|
||||||
|
wg sync.WaitGroup // wait for shutdown
|
||||||
|
}
|
||||||
|
|
||||||
|
// batcherRequest holds an incoming request with a place for a reply
|
||||||
|
type batcherRequest struct {
|
||||||
|
commitInfo *files.UploadSessionFinishArg
|
||||||
|
result chan<- batcherResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if batcherRequest is the quit request
|
||||||
|
func (br *batcherRequest) isQuit() bool {
|
||||||
|
return br.commitInfo == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send this to get the engine to quit
|
||||||
|
var quitRequest = batcherRequest{}
|
||||||
|
|
||||||
|
// batcherResponse holds a response to be delivered to clients waiting
|
||||||
|
// for a batch to complete.
|
||||||
|
type batcherResponse struct {
|
||||||
|
err error
|
||||||
|
entry *files.FileMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatcher creates a new batcher structure
|
||||||
|
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
|
||||||
|
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
|
||||||
|
if size > maxBatchSize || size < 0 {
|
||||||
|
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
async := false
|
||||||
|
|
||||||
|
switch mode {
|
||||||
|
case "sync":
|
||||||
|
if size <= 0 {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
|
size = ci.Transfers
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeoutSync
|
||||||
|
}
|
||||||
|
case "async":
|
||||||
|
if size <= 0 {
|
||||||
|
size = defaultBatchSizeAsync
|
||||||
|
}
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeoutAsync
|
||||||
|
}
|
||||||
|
async = true
|
||||||
|
case "off":
|
||||||
|
size = 0
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &batcher{
|
||||||
|
f: f,
|
||||||
|
mode: mode,
|
||||||
|
size: size,
|
||||||
|
timeout: timeout,
|
||||||
|
async: async,
|
||||||
|
in: make(chan batcherRequest, size),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
}
|
||||||
|
if b.Batching() {
|
||||||
|
b.atexit = atexit.Register(b.Shutdown)
|
||||||
|
b.wg.Add(1)
|
||||||
|
go b.commitLoop(context.Background())
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batching returns true if batching is active
|
||||||
|
func (b *batcher) Batching() bool {
|
||||||
|
return b.size > 0
|
||||||
|
}
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||||
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||||
var arg = &files.UploadSessionFinishBatchArg{
|
var arg = &files.UploadSessionFinishBatchArg{
|
||||||
Entries: items,
|
Entries: items,
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = b.f.pacer.Call(func() (bool, error) {
|
||||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
|
||||||
// If error is insufficient space then don't retry
|
// If error is insufficient space then don't retry
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
|
@ -37,10 +139,23 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||||
return complete, nil
|
return complete, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called by the batcher to commit a batch
|
// commit a batch
|
||||||
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
|
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||||
|
// If commit fails then signal clients if sync
|
||||||
|
var signalled = b.async
|
||||||
|
defer func() {
|
||||||
|
if err != nil && !signalled {
|
||||||
|
// Signal to clients that there was an error
|
||||||
|
for _, result := range results {
|
||||||
|
result <- batcherResponse{err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
|
||||||
|
fs.Debugf(b.f, "Committing %s", desc)
|
||||||
|
|
||||||
// finalise the batch getting either a result or a job id to poll
|
// finalise the batch getting either a result or a job id to poll
|
||||||
complete, err := f.finishBatch(ctx, items)
|
complete, err := b.finishBatch(ctx, items)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -51,13 +166,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format results for return
|
// Report results to clients
|
||||||
|
var (
|
||||||
|
errorTag = ""
|
||||||
|
errorCount = 0
|
||||||
|
)
|
||||||
for i := range results {
|
for i := range results {
|
||||||
item := entries[i]
|
item := entries[i]
|
||||||
|
resp := batcherResponse{}
|
||||||
if item.Tag == "success" {
|
if item.Tag == "success" {
|
||||||
results[i] = item.Success
|
resp.entry = item.Success
|
||||||
} else {
|
} else {
|
||||||
errorTag := item.Tag
|
errorCount++
|
||||||
|
errorTag = item.Tag
|
||||||
if item.Failure != nil {
|
if item.Failure != nil {
|
||||||
errorTag = item.Failure.Tag
|
errorTag = item.Failure.Tag
|
||||||
if item.Failure.LookupFailed != nil {
|
if item.Failure.LookupFailed != nil {
|
||||||
|
@ -70,9 +191,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
errorTag += "/" + item.Failure.PropertiesError.Tag
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
|
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
|
||||||
|
}
|
||||||
|
if !b.async {
|
||||||
|
results[i] <- resp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Show signalled so no need to report error to clients from now on
|
||||||
|
signalled = true
|
||||||
|
|
||||||
|
// Report an error if any failed in the batch
|
||||||
|
if errorTag != "" {
|
||||||
|
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(b.f, "Committed %s", desc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// commitLoop runs the commit engine in the background
|
||||||
|
func (b *batcher) commitLoop(ctx context.Context) {
|
||||||
|
var (
|
||||||
|
items []*files.UploadSessionFinishArg // current batch of uncommitted files
|
||||||
|
results []chan<- batcherResponse // current batch of clients awaiting results
|
||||||
|
idleTimer = time.NewTimer(b.timeout)
|
||||||
|
commit = func() {
|
||||||
|
err := b.commitBatch(ctx, items, results)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
|
||||||
|
}
|
||||||
|
items, results = nil, nil
|
||||||
|
}
|
||||||
|
)
|
||||||
|
defer b.wg.Done()
|
||||||
|
defer idleTimer.Stop()
|
||||||
|
idleTimer.Stop()
|
||||||
|
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case req := <-b.in:
|
||||||
|
if req.isQuit() {
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
items = append(items, req.commitInfo)
|
||||||
|
results = append(results, req.result)
|
||||||
|
idleTimer.Stop()
|
||||||
|
if len(items) >= b.size {
|
||||||
|
commit()
|
||||||
|
} else {
|
||||||
|
idleTimer.Reset(b.timeout)
|
||||||
|
}
|
||||||
|
case <-idleTimer.C:
|
||||||
|
if len(items) > 0 {
|
||||||
|
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
|
||||||
|
commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// commit any remaining items
|
||||||
|
if len(items) > 0 {
|
||||||
|
commit()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown finishes any pending batches then shuts everything down
|
||||||
|
//
|
||||||
|
// Can be called from atexit handler
|
||||||
|
func (b *batcher) Shutdown() {
|
||||||
|
if !b.Batching() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.shutOnce.Do(func() {
|
||||||
|
atexit.Unregister(b.atexit)
|
||||||
|
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||||
|
// show that batcher is shutting down
|
||||||
|
close(b.closed)
|
||||||
|
// quit the commitLoop by sending a quitRequest message
|
||||||
|
//
|
||||||
|
// Note that we don't close b.in because that will
|
||||||
|
// cause write to closed channel in Commit when we are
|
||||||
|
// exiting due to a signal.
|
||||||
|
b.in <- quitRequest
|
||||||
|
b.wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit commits the file using a batch call, first adding it to the
|
||||||
|
// batch and then waiting for the batch to complete in a synchronous
|
||||||
|
// way if async is not set.
|
||||||
|
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
|
||||||
|
select {
|
||||||
|
case <-b.closed:
|
||||||
|
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
|
||||||
|
resp := make(chan batcherResponse, 1)
|
||||||
|
b.in <- batcherRequest{
|
||||||
|
commitInfo: commitInfo,
|
||||||
|
result: resp,
|
||||||
|
}
|
||||||
|
// If running async then don't wait for the result
|
||||||
|
if b.async {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
result := <-resp
|
||||||
|
return result.entry, result.err
|
||||||
|
}
|
||||||
|
|
|
@ -47,7 +47,6 @@ import (
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
@ -122,14 +121,6 @@ var (
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
||||||
|
|
||||||
// Configure the batcher
|
|
||||||
defaultBatcherOptions = batcher.Options{
|
|
||||||
MaxBatchSize: 1000,
|
|
||||||
DefaultTimeoutSync: 500 * time.Millisecond,
|
|
||||||
DefaultTimeoutAsync: 10 * time.Second,
|
|
||||||
DefaultBatchSizeAsync: 100,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gets an oauth config with the right scopes
|
// Gets an oauth config with the right scopes
|
||||||
|
@ -161,7 +152,7 @@ func init() {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
||||||
|
|
||||||
|
@ -216,12 +207,71 @@ are supported.
|
||||||
|
|
||||||
Note that we don't unmount the shared folder afterwards so the
|
Note that we don't unmount the shared folder afterwards so the
|
||||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||||
shared folder.
|
shared folder.`,
|
||||||
|
|
||||||
See also --dropbox-root-namespace for an alternative way to work with shared
|
|
||||||
folders.`,
|
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_mode",
|
||||||
|
Help: `Upload file batching sync|async|off.
|
||||||
|
|
||||||
|
This sets the batch mode used by rclone.
|
||||||
|
|
||||||
|
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
|
||||||
|
|
||||||
|
This has 3 possible values
|
||||||
|
|
||||||
|
- off - no batching
|
||||||
|
- sync - batch uploads and check completion (default)
|
||||||
|
- async - batch upload and don't check completion
|
||||||
|
|
||||||
|
Rclone will close any outstanding batches when it exits which may make
|
||||||
|
a delay on quit.
|
||||||
|
`,
|
||||||
|
Default: "sync",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_size",
|
||||||
|
Help: `Max number of files in upload batch.
|
||||||
|
|
||||||
|
This sets the batch size of files to upload. It has to be less than 1000.
|
||||||
|
|
||||||
|
By default this is 0 which means rclone which calculate the batch size
|
||||||
|
depending on the setting of batch_mode.
|
||||||
|
|
||||||
|
- batch_mode: async - default batch_size is 100
|
||||||
|
- batch_mode: sync - default batch_size is the same as --transfers
|
||||||
|
- batch_mode: off - not in use
|
||||||
|
|
||||||
|
Rclone will close any outstanding batches when it exits which may make
|
||||||
|
a delay on quit.
|
||||||
|
|
||||||
|
Setting this is a great idea if you are uploading lots of small files
|
||||||
|
as it will make them a lot quicker. You can use --transfers 32 to
|
||||||
|
maximise throughput.
|
||||||
|
`,
|
||||||
|
Default: 0,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_timeout",
|
||||||
|
Help: `Max time to allow an idle upload batch before uploading.
|
||||||
|
|
||||||
|
If an upload batch is idle for more than this long then it will be
|
||||||
|
uploaded.
|
||||||
|
|
||||||
|
The default for this is 0 which means rclone will choose a sensible
|
||||||
|
default based on the batch_mode in use.
|
||||||
|
|
||||||
|
- batch_mode: async - default batch_timeout is 10s
|
||||||
|
- batch_mode: sync - default batch_timeout is 500ms
|
||||||
|
- batch_mode: off - not in use
|
||||||
|
`,
|
||||||
|
Default: fs.Duration(0),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "batch_commit_timeout",
|
||||||
|
Help: `Max time to wait for a batch to finish committing`,
|
||||||
|
Default: fs.Duration(10 * time.Minute),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "pacer_min_sleep",
|
Name: "pacer_min_sleep",
|
||||||
Default: defaultMinSleep,
|
Default: defaultMinSleep,
|
||||||
|
@ -240,28 +290,23 @@ folders.`,
|
||||||
encoder.EncodeDel |
|
encoder.EncodeDel |
|
||||||
encoder.EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
encoder.EncodeInvalidUtf8,
|
encoder.EncodeInvalidUtf8,
|
||||||
}, {
|
}}...),
|
||||||
Name: "root_namespace",
|
|
||||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
|
||||||
Default: "",
|
|
||||||
Advanced: true,
|
|
||||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
SharedFiles bool `config:"shared_files"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
SharedFolders bool `config:"shared_folders"`
|
SharedFolders bool `config:"shared_folders"`
|
||||||
BatchMode string `config:"batch_mode"`
|
BatchMode string `config:"batch_mode"`
|
||||||
BatchSize int `config:"batch_size"`
|
BatchSize int `config:"batch_size"`
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||||
AsyncBatch bool `config:"async_batch"`
|
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
AsyncBatch bool `config:"async_batch"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||||
RootNsid string `config:"root_namespace"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
|
@ -280,7 +325,7 @@ type Fs struct {
|
||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
batcher *batcher // batch builder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a dropbox object
|
// Object describes a dropbox object
|
||||||
|
@ -386,7 +431,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
oldToken = strings.TrimSpace(oldToken)
|
oldToken = strings.TrimSpace(oldToken)
|
||||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||||
fs.Infof(name, "Converting token to new format")
|
fs.Infof(name, "Converting token to new format")
|
||||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||||
|
@ -406,11 +451,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
ci: ci,
|
ci: ci,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
batcherOptions := defaultBatcherOptions
|
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||||
batcherOptions.Mode = f.opt.BatchMode
|
|
||||||
batcherOptions.Size = f.opt.BatchSize
|
|
||||||
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
|
||||||
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -437,15 +478,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
members := []*team.UserSelectorArg{&user}
|
members := []*team.UserSelectorArg{&user}
|
||||||
args := team.NewMembersGetInfoArgs(members)
|
args := team.NewMembersGetInfoArgs(members)
|
||||||
|
|
||||||
memberIDs, err := f.team.MembersGetInfo(args)
|
memberIds, err := f.team.MembersGetInfo(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||||
}
|
}
|
||||||
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
|
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||||
}
|
}
|
||||||
|
|
||||||
f.srv = files.New(cfg)
|
f.srv = files.New(cfg)
|
||||||
|
@ -511,11 +552,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
|
|
||||||
f.features.Fill(ctx, f)
|
f.features.Fill(ctx, f)
|
||||||
|
|
||||||
if f.opt.RootNsid != "" {
|
// If root starts with / then use the actual root
|
||||||
f.ns = f.opt.RootNsid
|
if strings.HasPrefix(root, "/") {
|
||||||
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
|
|
||||||
} else if strings.HasPrefix(root, "/") {
|
|
||||||
// If root starts with / then use the actual root
|
|
||||||
var acc *users.FullAccount
|
var acc *users.FullAccount
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
acc, err = f.users.GetCurrentAccount()
|
acc, err = f.users.GetCurrentAccount()
|
||||||
|
@ -656,7 +694,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||||
// we'll need the id later so we have to return them in original format
|
// we'll need the id later so we have to return them in original format
|
||||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
started := false
|
started := false
|
||||||
|
@ -958,7 +996,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
return errors.New("can't remove root directory")
|
return errors.New("can't remove root directory")
|
||||||
}
|
}
|
||||||
encRoot := f.opt.Enc.FromStandardPath(root)
|
|
||||||
|
|
||||||
if check {
|
if check {
|
||||||
// check directory exists
|
// check directory exists
|
||||||
|
@ -967,9 +1004,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||||
return fmt.Errorf("Rmdir: %w", err)
|
return fmt.Errorf("Rmdir: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
root = f.opt.Enc.FromStandardPath(root)
|
||||||
// check directory empty
|
// check directory empty
|
||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: encRoot,
|
Path: root,
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
}
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
|
@ -990,7 +1028,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||||
|
|
||||||
// remove it
|
// remove it
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
|
@ -1243,21 +1281,18 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var total uint64
|
var total uint64
|
||||||
used := q.Used
|
|
||||||
if q.Allocation != nil {
|
if q.Allocation != nil {
|
||||||
if q.Allocation.Individual != nil {
|
if q.Allocation.Individual != nil {
|
||||||
total += q.Allocation.Individual.Allocated
|
total += q.Allocation.Individual.Allocated
|
||||||
}
|
}
|
||||||
if q.Allocation.Team != nil {
|
if q.Allocation.Team != nil {
|
||||||
total += q.Allocation.Team.Allocated
|
total += q.Allocation.Team.Allocated
|
||||||
// Override used with Team.Used as this includes q.Used already
|
|
||||||
used = q.Allocation.Team.Used
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
@ -1687,7 +1722,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||||
// If we are batching then we should have written all the data now
|
// If we are batching then we should have written all the data now
|
||||||
// store the commit info now for a batch commit
|
// store the commit info now for a batch commit
|
||||||
if o.fs.batcher.Batching() {
|
if o.fs.batcher.Batching() {
|
||||||
return o.fs.batcher.Commit(ctx, o.remote, args)
|
return o.fs.batcher.Commit(ctx, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
|
|
@ -28,14 +28,14 @@ var retryErrorCodes = []int{
|
||||||
509, // Bandwidth Limit Exceeded
|
509, // Bandwidth Limit Exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
|
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||||
|
|
||||||
func parseFichierError(err error) int {
|
func parseFichierError(err error) int {
|
||||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||||
if len(matches) == 0 {
|
if len(matches) == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
code, err := strconv.Atoi(matches[1])
|
code, err := strconv.Atoi(matches[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||||
return 0
|
return 0
|
||||||
|
@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||||
return false, err // No such user
|
return false, err // No such user
|
||||||
case 186:
|
case 186:
|
||||||
return false, err // IP blocked?
|
return false, err // IP blocked?
|
||||||
case 374, 412: // Flood detected seems to be #412 now
|
case 374:
|
||||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
srcFs := srcObj.fs
|
|
||||||
|
|
||||||
// Find current directory ID
|
// Find current directory ID
|
||||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it is in the correct directory, just rename it
|
// If it is in the correct directory, just rename it
|
||||||
var url string
|
var url string
|
||||||
if srcDirectoryID == dstDirectoryID {
|
if currentDirectoryID == directoryID {
|
||||||
// No rename needed
|
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||||
if srcLeaf == dstLeaf {
|
|
||||||
return src, nil
|
|
||||||
}
|
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||||
}
|
}
|
||||||
url = resp.URLs[0].URL
|
url = resp.URLs[0].URL
|
||||||
} else {
|
} else {
|
||||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rename := dstLeaf
|
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
// No rename needed
|
|
||||||
if srcLeaf == dstLeaf {
|
|
||||||
rename = ""
|
|
||||||
}
|
|
||||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -158,9 +158,9 @@ type Fs struct {
|
||||||
tokenMu sync.Mutex // hold when reading the token
|
tokenMu sync.Mutex // hold when reading the token
|
||||||
token string // current access token
|
token string // current access token
|
||||||
tokenExpiry time.Time // time the current token expires
|
tokenExpiry time.Time // time the current token expires
|
||||||
tokenExpired atomic.Int32
|
tokenExpired int32 // read and written with atomic
|
||||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||||
precision time.Duration // precision reported
|
precision time.Duration // precision reported
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a filefabric object
|
// Object describes a filefabric object
|
||||||
|
@ -243,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
||||||
err = status // return the error from the RPC
|
err = status // return the error from the RPC
|
||||||
code := status.GetCode()
|
code := status.GetCode()
|
||||||
if code == "login_token_expired" {
|
if code == "login_token_expired" {
|
||||||
f.tokenExpired.Add(1)
|
atomic.AddInt32(&f.tokenExpired, 1)
|
||||||
} else {
|
} else {
|
||||||
for _, retryCode := range retryStatusCodes {
|
for _, retryCode := range retryStatusCodes {
|
||||||
if code == retryCode.code {
|
if code == retryCode.code {
|
||||||
|
@ -323,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||||
var refreshed = false
|
var refreshed = false
|
||||||
defer func() {
|
defer func() {
|
||||||
if refreshed {
|
if refreshed {
|
||||||
f.tokenExpired.Store(0)
|
atomic.StoreInt32(&f.tokenExpired, 0)
|
||||||
}
|
}
|
||||||
f.tokenMu.Unlock()
|
f.tokenMu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
expired := f.tokenExpired.Load() != 0
|
expired := atomic.LoadInt32(&f.tokenExpired) != 0
|
||||||
if expired {
|
if expired {
|
||||||
fs.Debugf(f, "Token invalid - refreshing")
|
fs.Debugf(f, "Token invalid - refreshing")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,901 +0,0 @@
|
||||||
// Package filescom provides an interface to the Files.com
|
|
||||||
// object storage system.
|
|
||||||
package filescom
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/file"
|
|
||||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
|
||||||
"github.com/Files-com/files-sdk-go/v3/session"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Run of rclone info
|
|
||||||
stringNeedsEscaping = []rune{
|
|
||||||
'/', '\x00'
|
|
||||||
}
|
|
||||||
maxFileLength = 512 // for 1 byte unicode characters
|
|
||||||
maxFileLength = 512 // for 2 byte unicode characters
|
|
||||||
maxFileLength = 512 // for 3 byte unicode characters
|
|
||||||
maxFileLength = 512 // for 4 byte unicode characters
|
|
||||||
canWriteUnnormalized = true
|
|
||||||
canReadUnnormalized = true
|
|
||||||
canReadRenormalized = true
|
|
||||||
canStream = true
|
|
||||||
*/
|
|
||||||
|
|
||||||
const (
|
|
||||||
minSleep = 10 * time.Millisecond
|
|
||||||
maxSleep = 2 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
|
|
||||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "filescom",
|
|
||||||
Description: "Files.com",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Name: "site",
|
|
||||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
|
||||||
}, {
|
|
||||||
Name: "username",
|
|
||||||
Help: "The username used to authenticate with Files.com.",
|
|
||||||
}, {
|
|
||||||
Name: "password",
|
|
||||||
Help: "The password used to authenticate with Files.com.",
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
|
||||||
Name: "api_key",
|
|
||||||
Help: "The API key used to authenticate with Files.com.",
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Display |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeRightCrLfHtVt |
|
|
||||||
encoder.EncodeInvalidUtf8),
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Site string `config:"site"`
|
|
||||||
Username string `config:"username"`
|
|
||||||
Password string `config:"password"`
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a remote files.com server
|
|
||||||
type Fs struct {
|
|
||||||
name string // name of this remote
|
|
||||||
root string // the path we are working on
|
|
||||||
opt Options // parsed options
|
|
||||||
features *fs.Features // optional features
|
|
||||||
fileClient *file.Client // the connection to the file API
|
|
||||||
folderClient *folder.Client // the connection to the folder API
|
|
||||||
migrationClient *file_migration.Client // the connection to the file migration API
|
|
||||||
bundleClient *bundle.Client // the connection to the bundle API
|
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a files object
|
|
||||||
//
|
|
||||||
// Will definitely have info but maybe not meta
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs // what this object is part of
|
|
||||||
remote string // The remote path
|
|
||||||
size int64 // size of the object
|
|
||||||
crc32 string // CRC32 of the object content
|
|
||||||
md5 string // MD5 of the object content
|
|
||||||
mimeType string // Content-Type of the object
|
|
||||||
modTime time.Time // modification time of the object
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("files root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode remote and turn it into an absolute path in the share
|
|
||||||
func (f *Fs) absPath(remote string) string {
|
|
||||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests.
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
|
||||||
// retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
|
||||||
for _, e := range retryErrorCodes {
|
|
||||||
if apiErr.HttpCode == e {
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fserrors.ShouldRetry(err), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
|
||||||
params := files_sdk.FileFindParams{
|
|
||||||
Path: f.absPath(path),
|
|
||||||
}
|
|
||||||
|
|
||||||
var file files_sdk.File
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
// Parse config into Options struct
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
config, err := newClientConfig(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
fileClient: &file.Client{Config: config},
|
|
||||||
folderClient: &folder.Client{Config: config},
|
|
||||||
migrationClient: &file_migration.Client{Config: config},
|
|
||||||
bundleClient: &bundle.Client{Config: config},
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CaseInsensitive: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
ReadMimeType: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
if f.root != "" {
|
|
||||||
info, err := f.readMetaDataForPath(ctx, "")
|
|
||||||
if err == nil && !info.IsDir() {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
|
||||||
if opt.Site != "" {
|
|
||||||
if strings.Contains(opt.Site, ".") {
|
|
||||||
config.EndpointOverride = opt.Site
|
|
||||||
} else {
|
|
||||||
config.Subdomain = opt.Site
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = url.ParseRequestURI(config.Endpoint())
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
|
||||||
|
|
||||||
if opt.APIKey != "" {
|
|
||||||
config.APIKey = opt.APIKey
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.Username == "" {
|
|
||||||
err = errors.New("username not found")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if opt.Password == "" {
|
|
||||||
err = errors.New("password not found")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
opt.Password, err = obscure.Reveal(opt.Password)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sessionClient := session.Client{Config: config}
|
|
||||||
params := files_sdk.SessionCreateParams{
|
|
||||||
Username: opt.Username,
|
|
||||||
Password: opt.Password,
|
|
||||||
}
|
|
||||||
|
|
||||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("couldn't create session: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
config.SessionId = thisSession.Id
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an Object from a path
|
|
||||||
//
|
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if file != nil {
|
|
||||||
err = o.setMetaData(file)
|
|
||||||
} else {
|
|
||||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
var it *folder.Iter
|
|
||||||
params := files_sdk.FolderListForParams{
|
|
||||||
Path: f.absPath(dir),
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for it.Next() {
|
|
||||||
item := ptr(it.File())
|
|
||||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
|
||||||
remote = path.Join(dir, remote)
|
|
||||||
if remote == dir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if item.IsDir() {
|
|
||||||
d := fs.NewDir(remote, item.ModTime())
|
|
||||||
entries = append(entries, d)
|
|
||||||
} else {
|
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = it.Err()
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates from the parameters passed in a half finished Object which
|
|
||||||
// must have setMetaData called on it
|
|
||||||
//
|
|
||||||
// Returns the object and error.
|
|
||||||
//
|
|
||||||
// Used to create new objects
|
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
|
||||||
// Create the directory for the object if it doesn't exist
|
|
||||||
err = f.mkParentDir(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Temporary Object under construction
|
|
||||||
o = &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put the object
|
|
||||||
//
|
|
||||||
// Copy the reader in to the new object which is returned.
|
|
||||||
//
|
|
||||||
// The new object may have been created if an error is returned
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
// Temporary Object under construction
|
|
||||||
fs := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
}
|
|
||||||
return fs, fs.Update(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return f.Put(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
|
||||||
if path == "" || path == "." {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
params := files_sdk.FolderCreateParams{
|
|
||||||
Path: path,
|
|
||||||
MkdirParents: ptr(true),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if files_sdk.IsExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the parent directory of remote
|
|
||||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
|
||||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
return f.mkdir(ctx, f.absPath(dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
o := Object{
|
|
||||||
fs: f,
|
|
||||||
remote: dir,
|
|
||||||
}
|
|
||||||
return o.SetModTime(ctx, modTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
|
||||||
// refuses to do so if it has anything in
|
|
||||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|
||||||
path := f.absPath(dir)
|
|
||||||
if path == "" || path == "." {
|
|
||||||
return errors.New("can't purge root directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
params := files_sdk.FileDeleteParams{
|
|
||||||
Path: path,
|
|
||||||
Recursive: ptr(!check),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
|
||||||
// Allow for eventual consistency deletion of child objects.
|
|
||||||
if isFolderNotEmpty(err) {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
} else if isFolderNotEmpty(err) {
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("rmdir failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes the root folder
|
|
||||||
//
|
|
||||||
// Returns an error if it isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
return f.purgeCheck(ctx, dir, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
err = srcObj.readMetaData(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
|
||||||
dstPath := f.absPath(remote)
|
|
||||||
if strings.EqualFold(srcPath, dstPath) {
|
|
||||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
|
||||||
dstObj, err = f.createObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the object
|
|
||||||
params := files_sdk.FileCopyParams{
|
|
||||||
Path: srcPath,
|
|
||||||
Destination: dstPath,
|
|
||||||
Overwrite: ptr(true),
|
|
||||||
}
|
|
||||||
|
|
||||||
var action files_sdk.FileAction
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.waitForAction(ctx, action, "copy")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
|
||||||
//
|
|
||||||
// Optional interface: Only implement this if you have a way of
|
|
||||||
// deleting all the files quicker than just running Remove() on the
|
|
||||||
// result of List()
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
return f.purgeCheck(ctx, dir, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// move a file or folder
|
|
||||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
|
||||||
// Move the object
|
|
||||||
params := files_sdk.FileMoveParams{
|
|
||||||
Path: src.absPath(srcRemote),
|
|
||||||
Destination: f.absPath(dstRemote),
|
|
||||||
}
|
|
||||||
|
|
||||||
var action files_sdk.FileAction
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.waitForAction(ctx, action, "move")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
|
||||||
var migration files_sdk.FileMigration
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
|
||||||
// noop
|
|
||||||
}, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err == nil && migration.Status != "completed" {
|
|
||||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
|
||||||
dstObj, err := f.createObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the move
|
|
||||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dstObj.setMetaData(info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dstObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if destination exists
|
|
||||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
|
||||||
if err == nil {
|
|
||||||
return fs.ErrorDirExists
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
|
||||||
dstObj, err := f.createObject(ctx, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the move
|
|
||||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
|
||||||
params := files_sdk.BundleCreateParams{
|
|
||||||
Paths: []string{f.absPath(remote)},
|
|
||||||
}
|
|
||||||
if expire < fs.DurationOff {
|
|
||||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
|
||||||
}
|
|
||||||
|
|
||||||
var bundle files_sdk.Bundle
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
url = bundle.Url
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
switch t {
|
|
||||||
case hash.CRC32:
|
|
||||||
if o.crc32 == "" {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%08s", o.crc32), nil
|
|
||||||
case hash.MD5:
|
|
||||||
return o.md5, nil
|
|
||||||
}
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of an object in bytes
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// setMetaData sets the metadata from info
|
|
||||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
|
||||||
o.modTime = file.ModTime()
|
|
||||||
|
|
||||||
if !file.IsDir() {
|
|
||||||
o.size = file.Size
|
|
||||||
o.crc32 = file.Crc32
|
|
||||||
o.md5 = file.Md5
|
|
||||||
o.mimeType = file.MimeType
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
|
||||||
//
|
|
||||||
// it also sets the info
|
|
||||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|
||||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
|
||||||
if err != nil {
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
return fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if file.IsDir() {
|
|
||||||
return fs.ErrorIsDir
|
|
||||||
}
|
|
||||||
return o.setMetaData(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
//
|
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
|
||||||
// LastModified returned in the http headers
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
|
||||||
params := files_sdk.FileUpdateParams{
|
|
||||||
Path: o.fs.absPath(o.remote),
|
|
||||||
ProvidedMtime: &modTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file files_sdk.File
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return o.setMetaData(&file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns a boolean showing whether this object storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
// Offset and Count for range download
|
|
||||||
var offset, count int64
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size - offset
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
params := files_sdk.FileDownloadParams{
|
|
||||||
Path: o.fs.absPath(o.remote),
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := &http.Header{}
|
|
||||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = o.fs.fileClient.Download(
|
|
||||||
params,
|
|
||||||
files_sdk.WithContext(ctx),
|
|
||||||
files_sdk.RequestHeadersOption(headers),
|
|
||||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
|
||||||
in = closer
|
|
||||||
return err
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a pointer to t - useful for returning pointers to constants
|
|
||||||
func ptr[T any](t T) *T {
|
|
||||||
return &t
|
|
||||||
}
|
|
||||||
|
|
||||||
func isFolderNotEmpty(err error) bool {
|
|
||||||
var re files_sdk.ResponseError
|
|
||||||
ok := errors.As(err, &re)
|
|
||||||
return ok && re.Type == folderNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
|
||||||
//
|
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
|
||||||
//
|
|
||||||
// The new object may have been created if an error is returned.
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
uploadOpts := []file.UploadOption{
|
|
||||||
file.UploadWithContext(ctx),
|
|
||||||
file.UploadWithReader(in),
|
|
||||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
|
||||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.readMetaData(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
params := files_sdk.FileDeleteParams{
|
|
||||||
Path: o.fs.absPath(o.remote),
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return o.mimeType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
)
|
|
|
@ -1,17 +0,0 @@
|
||||||
// Test Files filesystem interface
|
|
||||||
package filescom_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/filescom"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFilesCom:",
|
|
||||||
NilObject: (*filescom.Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,16 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFrostFS:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
SkipInvalidUTF8: true,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,326 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
resolver "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
||||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
|
||||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type endpointInfo struct {
|
|
||||||
Address string
|
|
||||||
Priority int
|
|
||||||
Weight float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func publicReadWriteCCPRules() []chain.Rule {
|
|
||||||
return []chain.Rule{
|
|
||||||
{
|
|
||||||
Status: chain.Allow, Actions: chain.Actions{
|
|
||||||
Inverted: false,
|
|
||||||
Names: []string{
|
|
||||||
native.MethodPutObject,
|
|
||||||
native.MethodGetObject,
|
|
||||||
native.MethodHeadObject,
|
|
||||||
native.MethodDeleteObject,
|
|
||||||
native.MethodSearchObject,
|
|
||||||
native.MethodRangeObject,
|
|
||||||
native.MethodHashObject,
|
|
||||||
native.MethodPatchObject,
|
|
||||||
},
|
|
||||||
}, Resources: chain.Resources{
|
|
||||||
Inverted: false,
|
|
||||||
Names: []string{native.ResourceFormatRootObjects},
|
|
||||||
}, Any: false},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func privateCCPRules() []chain.Rule {
|
|
||||||
rule := publicReadWriteCCPRules()
|
|
||||||
// The same as public-read-write, except that only the owner is allowed to perform the listed actions
|
|
||||||
rule[0].Condition = []chain.Condition{
|
|
||||||
{
|
|
||||||
Op: chain.CondStringEquals,
|
|
||||||
Kind: chain.KindRequest,
|
|
||||||
Key: native.PropertyKeyActorRole,
|
|
||||||
Value: native.PropertyValueContainerRoleOwner,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return rule
|
|
||||||
}
|
|
||||||
|
|
||||||
func publicReadCCPRules() []chain.Rule {
|
|
||||||
rule := privateCCPRules()
|
|
||||||
// Add a rule that allows other users to perform reading actions.
|
|
||||||
rule = append(rule, chain.Rule{
|
|
||||||
Status: chain.Allow, Actions: chain.Actions{
|
|
||||||
Inverted: false,
|
|
||||||
Names: []string{
|
|
||||||
native.MethodGetObject,
|
|
||||||
native.MethodHeadObject,
|
|
||||||
native.MethodRangeObject,
|
|
||||||
native.MethodHashObject,
|
|
||||||
native.MethodSearchObject,
|
|
||||||
},
|
|
||||||
}, Resources: chain.Resources{
|
|
||||||
Inverted: false,
|
|
||||||
Names: []string{native.ResourceFormatRootObjects},
|
|
||||||
}, Condition: []chain.Condition{
|
|
||||||
{
|
|
||||||
Op: chain.CondStringEquals,
|
|
||||||
Kind: chain.KindRequest,
|
|
||||||
Key: native.PropertyKeyActorRole,
|
|
||||||
Value: native.PropertyValueContainerRoleOthers,
|
|
||||||
},
|
|
||||||
}, Any: false})
|
|
||||||
return rule
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseContainerCreationPolicyString(policyString string) ([]chain.Rule, error) {
|
|
||||||
switch policyString {
|
|
||||||
case "private":
|
|
||||||
return privateCCPRules(), nil
|
|
||||||
case "public-read":
|
|
||||||
return publicReadCCPRules(), nil
|
|
||||||
case "public-read-write":
|
|
||||||
return publicReadWriteCCPRules(), nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("invalid container creation policy: %s", policyString)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEndpoints(endpointParam string) ([]endpointInfo, error) {
|
|
||||||
var err error
|
|
||||||
expectedLength := -1 // to make sure all endpoints have the same format
|
|
||||||
|
|
||||||
endpoints := strings.Split(strings.TrimSpace(endpointParam), " ")
|
|
||||||
res := make([]endpointInfo, 0, len(endpoints))
|
|
||||||
seen := make(map[string]struct{}, len(endpoints))
|
|
||||||
|
|
||||||
for _, endpoint := range endpoints {
|
|
||||||
endpointInfoSplit := strings.Split(endpoint, ",")
|
|
||||||
address := endpointInfoSplit[0]
|
|
||||||
|
|
||||||
if len(address) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := seen[address]; ok {
|
|
||||||
return nil, fmt.Errorf("endpoint '%s' is already defined", address)
|
|
||||||
}
|
|
||||||
seen[address] = struct{}{}
|
|
||||||
|
|
||||||
epInfo := endpointInfo{
|
|
||||||
Address: address,
|
|
||||||
Priority: 1,
|
|
||||||
Weight: 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
if expectedLength == -1 {
|
|
||||||
expectedLength = len(endpointInfoSplit)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(endpointInfoSplit) != expectedLength {
|
|
||||||
return nil, fmt.Errorf("all endpoints must have the same format: '%s'", endpointParam)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch len(endpointInfoSplit) {
|
|
||||||
case 1:
|
|
||||||
case 2:
|
|
||||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
|
||||||
}
|
|
||||||
case 3:
|
|
||||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
epInfo.Weight, err = parseWeight(endpointInfoSplit[2])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid endpoint format '%s'", endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
res = append(res, epInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parsePriority(priorityStr string) (int, error) {
|
|
||||||
priority, err := strconv.Atoi(priorityStr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("invalid priority '%s': %w", priorityStr, err)
|
|
||||||
}
|
|
||||||
if priority <= 0 {
|
|
||||||
return 0, fmt.Errorf("priority must be positive '%s'", priorityStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return priority, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseWeight(weightStr string) (float64, error) {
|
|
||||||
weight, err := strconv.ParseFloat(weightStr, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("invalid weight '%s': %w", weightStr, err)
|
|
||||||
}
|
|
||||||
if weight <= 0 {
|
|
||||||
return 0, fmt.Errorf("weight must be positive '%s'", weightStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return weight, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createPool(ctx context.Context, key *keys.PrivateKey, cfg *Options) (*pool.Pool, error) {
|
|
||||||
var prm pool.InitParameters
|
|
||||||
prm.SetKey(&key.PrivateKey)
|
|
||||||
prm.SetNodeDialTimeout(time.Duration(cfg.FrostfsConnectionTimeout))
|
|
||||||
prm.SetHealthcheckTimeout(time.Duration(cfg.FrostfsRequestTimeout))
|
|
||||||
prm.SetClientRebalanceInterval(time.Duration(cfg.FrostfsRebalanceInterval))
|
|
||||||
prm.SetSessionExpirationDuration(cfg.FrostfsSessionExpiration)
|
|
||||||
|
|
||||||
nodes, err := getNodePoolParams(cfg.FrostfsEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, node := range nodes {
|
|
||||||
prm.AddNode(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := pool.NewPool(prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("create pool: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = p.Dial(ctx); err != nil {
|
|
||||||
return nil, fmt.Errorf("dial pool: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNodePoolParams(endpointParam string) ([]pool.NodeParam, error) {
|
|
||||||
endpointInfos, err := parseEndpoints(endpointParam)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parse endpoints params: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := make([]pool.NodeParam, len(endpointInfos))
|
|
||||||
for i, info := range endpointInfos {
|
|
||||||
res[i] = pool.NewNodeParam(info.Priority, info.Address, info.Weight)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createNNSResolver(cfg *Options) (*resolver.NNS, error) {
|
|
||||||
if cfg.RPCEndpoint == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var nns resolver.NNS
|
|
||||||
if err := nns.Dial(cfg.RPCEndpoint); err != nil {
|
|
||||||
return nil, fmt.Errorf("dial NNS resolver: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &nns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAccount(cfg *Options) (*wallet.Account, error) {
|
|
||||||
w, err := wallet.NewWalletFromFile(cfg.Wallet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := w.GetChangeAddress()
|
|
||||||
if cfg.Address != "" {
|
|
||||||
addr, err = flags.ParseAddress(cfg.Address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid address")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
acc := w.GetAccount(addr)
|
|
||||||
err = acc.Decrypt(cfg.Password, w.Scrypt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return acc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAddress(cnrID cid.ID, objID oid.ID) oid.Address {
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(cnrID)
|
|
||||||
addr.SetObject(objID)
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
func formObject(own *user.ID, cnrID cid.ID, name string, header map[string]string) *object.Object {
|
|
||||||
attributes := make([]object.Attribute, 0, 1+len(header))
|
|
||||||
filename := object.NewAttribute()
|
|
||||||
filename.SetKey(object.AttributeFileName)
|
|
||||||
filename.SetValue(name)
|
|
||||||
|
|
||||||
attributes = append(attributes, *filename)
|
|
||||||
|
|
||||||
for key, val := range header {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(key)
|
|
||||||
attr.SetValue(val)
|
|
||||||
attributes = append(attributes, *attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := object.New()
|
|
||||||
obj.SetOwnerID(*own)
|
|
||||||
obj.SetContainerID(cnrID)
|
|
||||||
obj.SetAttributes(attributes...)
|
|
||||||
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDir(cnrID cid.ID, cnr container.Container, defaultZone string) *fs.Dir {
|
|
||||||
remote := cnrID.EncodeToString()
|
|
||||||
timestamp := container.CreatedAt(cnr)
|
|
||||||
|
|
||||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
|
||||||
if defaultZone != domain.Zone() {
|
|
||||||
remote = domain.Name() + "." + domain.Zone()
|
|
||||||
} else {
|
|
||||||
remote = domain.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dir := fs.NewDir(remote, timestamp)
|
|
||||||
dir.SetID(cnrID.String())
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
func getContainerNameAndZone(containerStr, defaultZone string) (cnrName string, cnrZone string) {
|
|
||||||
defer func() {
|
|
||||||
if len(cnrZone) == 0 {
|
|
||||||
cnrZone = defaultZone
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if idx := strings.Index(containerStr, "."); idx >= 0 {
|
|
||||||
return containerStr[:idx], containerStr[idx+1:]
|
|
||||||
}
|
|
||||||
return containerStr, defaultZone
|
|
||||||
}
|
|
|
@ -1,205 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetZoneAndContainerNames(t *testing.T) {
|
|
||||||
for i, tc := range []struct {
|
|
||||||
cnrStr string
|
|
||||||
defZone string
|
|
||||||
expectedName string
|
|
||||||
expectedZone string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
cnrStr: "",
|
|
||||||
defZone: "def_zone",
|
|
||||||
expectedName: "",
|
|
||||||
expectedZone: "def_zone",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cnrStr: "",
|
|
||||||
defZone: "def_zone",
|
|
||||||
expectedName: "",
|
|
||||||
expectedZone: "def_zone",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cnrStr: "cnr_name",
|
|
||||||
defZone: "def_zone",
|
|
||||||
expectedName: "cnr_name",
|
|
||||||
expectedZone: "def_zone",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cnrStr: "cnr_name.",
|
|
||||||
defZone: "def_zone",
|
|
||||||
expectedName: "cnr_name",
|
|
||||||
expectedZone: "def_zone",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cnrStr: ".cnr_zone",
|
|
||||||
defZone: "def_zone",
|
|
||||||
expectedName: "",
|
|
||||||
expectedZone: "cnr_zone",
|
|
||||||
}, {
|
|
||||||
cnrStr: ".cnr_zone",
|
|
||||||
defZone: "def_zone",
|
|
||||||
expectedName: "",
|
|
||||||
expectedZone: "cnr_zone",
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
|
||||||
actualName, actualZone := getContainerNameAndZone(tc.cnrStr, tc.defZone)
|
|
||||||
require.Equal(t, tc.expectedZone, actualZone)
|
|
||||||
require.Equal(t, tc.expectedName, actualName)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseContainerCreationPolicy(t *testing.T) {
|
|
||||||
for i, tc := range []struct {
|
|
||||||
ACLString string
|
|
||||||
ExpectedError bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
ACLString: "",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ACLString: "public-ready",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ACLString: "public-read",
|
|
||||||
ExpectedError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ACLString: "public-read-write",
|
|
||||||
ExpectedError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ACLString: "private",
|
|
||||||
ExpectedError: false,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
|
||||||
rules, err := parseContainerCreationPolicyString(tc.ACLString)
|
|
||||||
if tc.ExpectedError {
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Nil(t, rules)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, rules)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseEndpoints(t *testing.T) {
|
|
||||||
for i, tc := range []struct {
|
|
||||||
EndpointsParam string
|
|
||||||
ExpectedError bool
|
|
||||||
ExpectedResult []endpointInfo
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080",
|
|
||||||
ExpectedResult: []endpointInfo{{
|
|
||||||
Address: "s01.frostfs.devenv:8080",
|
|
||||||
Priority: 1,
|
|
||||||
Weight: 1,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,2",
|
|
||||||
ExpectedResult: []endpointInfo{{
|
|
||||||
Address: "s01.frostfs.devenv:8080",
|
|
||||||
Priority: 2,
|
|
||||||
Weight: 1,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,2,3",
|
|
||||||
ExpectedResult: []endpointInfo{{
|
|
||||||
Address: "s01.frostfs.devenv:8080",
|
|
||||||
Priority: 2,
|
|
||||||
Weight: 3,
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: " s01.frostfs.devenv:8080 s02.frostfs.devenv:8080 ",
|
|
||||||
ExpectedResult: []endpointInfo{
|
|
||||||
{
|
|
||||||
Address: "s01.frostfs.devenv:8080",
|
|
||||||
Priority: 1,
|
|
||||||
Weight: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Address: "s02.frostfs.devenv:8080",
|
|
||||||
Priority: 1,
|
|
||||||
Weight: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9",
|
|
||||||
ExpectedResult: []endpointInfo{
|
|
||||||
{
|
|
||||||
Address: "s01.frostfs.devenv:8080",
|
|
||||||
Priority: 1,
|
|
||||||
Weight: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Address: "s02.frostfs.devenv:8080",
|
|
||||||
Priority: 2,
|
|
||||||
Weight: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Address: "s03.frostfs.devenv:8080",
|
|
||||||
Priority: 2,
|
|
||||||
Weight: 9,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,-1,1",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,,",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,sd,sd",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,1,0",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080,1",
|
|
||||||
ExpectedError: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
|
||||||
res, err := parseEndpoints(tc.EndpointsParam)
|
|
||||||
if tc.ExpectedError {
|
|
||||||
require.Error(t, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tc.ExpectedResult, res)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/rclone/ftp"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "concurrency",
|
Name: "concurrency",
|
||||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||||
|
|
||||||
Note that setting this is very likely to cause deadlocks so it should
|
Note that setting this is very likely to cause deadlocks so it should
|
||||||
be used with care.
|
be used with care.
|
||||||
|
@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
||||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||||
--check-first| or |--checkers 1 --transfers 1|.
|
--check-first| or |--checkers 1 --transfers 1|.
|
||||||
|
|
||||||
`, "|", "`"),
|
`, "|", "`", -1),
|
||||||
Default: 0,
|
Default: 0,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
|
@ -249,6 +249,7 @@ type Fs struct {
|
||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
tokens *pacer.TokenDispenser
|
tokens *pacer.TokenDispenser
|
||||||
|
tlsConf *tls.Config
|
||||||
pacer *fs.Pacer // pacer for FTP connections
|
pacer *fs.Pacer // pacer for FTP connections
|
||||||
fGetTime bool // true if the ftp library accepts GetTime
|
fGetTime bool // true if the ftp library accepts GetTime
|
||||||
fSetTime bool // true if the ftp library accepts SetTime
|
fSetTime bool // true if the ftp library accepts SetTime
|
||||||
|
@ -361,36 +362,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a TLS config with a unique session cache.
|
|
||||||
//
|
|
||||||
// We can't share session caches between connections.
|
|
||||||
//
|
|
||||||
// See: https://github.com/rclone/rclone/issues/7234
|
|
||||||
func (f *Fs) tlsConfig() *tls.Config {
|
|
||||||
var tlsConfig *tls.Config
|
|
||||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
|
||||||
tlsConfig = &tls.Config{
|
|
||||||
ServerName: f.opt.Host,
|
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
|
||||||
}
|
|
||||||
if f.opt.TLSCacheSize > 0 {
|
|
||||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
|
||||||
}
|
|
||||||
if f.opt.DisableTLS13 {
|
|
||||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tlsConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
|
|
||||||
// tls.Config for this connection only. Will be used for data
|
|
||||||
// and control connections.
|
|
||||||
tlsConfig := f.tlsConfig()
|
|
||||||
|
|
||||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||||
initialConnection := true
|
initialConnection := true
|
||||||
dial := func(network, address string) (conn net.Conn, err error) {
|
dial := func(network, address string) (conn net.Conn, err error) {
|
||||||
|
@ -408,7 +383,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Connect using cleartext only for non TLS
|
// Connect using cleartext only for non TLS
|
||||||
if tlsConfig == nil {
|
if f.tlsConf == nil {
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
// Initial connection only needs to be cleartext for explicit TLS
|
// Initial connection only needs to be cleartext for explicit TLS
|
||||||
|
@ -417,7 +392,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
// Upgrade connection to TLS
|
// Upgrade connection to TLS
|
||||||
tlsConn := tls.Client(conn, tlsConfig)
|
tlsConn := tls.Client(conn, f.tlsConf)
|
||||||
// Do the initial handshake - tls.Client doesn't do it for us
|
// Do the initial handshake - tls.Client doesn't do it for us
|
||||||
// If we do this then connections to proftpd/pureftpd lock up
|
// If we do this then connections to proftpd/pureftpd lock up
|
||||||
// See: https://github.com/rclone/rclone/issues/6426
|
// See: https://github.com/rclone/rclone/issues/6426
|
||||||
|
@ -439,9 +414,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
if f.opt.TLS {
|
if f.opt.TLS {
|
||||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||||
// as a trigger for sending PSBZ and PROT options to server.
|
// as a trigger for sending PSBZ and PROT options to server.
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||||
} else if f.opt.ExplicitTLS {
|
} else if f.opt.ExplicitTLS {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||||
}
|
}
|
||||||
if f.opt.DisableEPSV {
|
if f.opt.DisableEPSV {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||||
|
@ -596,6 +571,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||||
if opt.TLS && opt.ExplicitTLS {
|
if opt.TLS && opt.ExplicitTLS {
|
||||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||||
}
|
}
|
||||||
|
var tlsConfig *tls.Config
|
||||||
|
if opt.TLS || opt.ExplicitTLS {
|
||||||
|
tlsConfig = &tls.Config{
|
||||||
|
ServerName: opt.Host,
|
||||||
|
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||||
|
}
|
||||||
|
if opt.TLSCacheSize > 0 {
|
||||||
|
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||||
|
}
|
||||||
|
if opt.DisableTLS13 {
|
||||||
|
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||||
|
}
|
||||||
|
}
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
|
@ -608,6 +596,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||||
pass: pass,
|
pass: pass,
|
||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
|
tlsConf: tlsConfig,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
|
@ -970,8 +959,6 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if errX := textprotoError(err); errX != nil {
|
if errX := textprotoError(err); errX != nil {
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257
|
|
||||||
err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/
|
|
||||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||||
err = nil
|
err = nil
|
||||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||||
|
|
|
@ -1,311 +0,0 @@
|
||||||
// Package api has type definitions for gofile
|
|
||||||
//
|
|
||||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// 2017-05-03T07:26:10-07:00
|
|
||||||
timeFormat = `"` + time.RFC3339 + `"`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Time represents date and time information for the
|
|
||||||
// gofile API, by using RFC3339
|
|
||||||
type Time time.Time
|
|
||||||
|
|
||||||
// MarshalJSON turns a Time into JSON (in UTC)
|
|
||||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
|
||||||
timeString := (*time.Time)(t).Format(timeFormat)
|
|
||||||
return []byte(timeString), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into a Time
|
|
||||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
|
||||||
newT, err := time.Parse(timeFormat, string(data))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*t = Time(newT)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error is returned from gofile when things go wrong
|
|
||||||
type Error struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
|
||||||
func (e Error) Error() string {
|
|
||||||
out := fmt.Sprintf("Error %q", e.Status)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsError returns true if there is an error
|
|
||||||
func (e Error) IsError() bool {
|
|
||||||
return e.Status != "ok"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns err if not nil, or e if IsError or nil
|
|
||||||
func (e Error) Err(err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if e.IsError() {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Error satisfies the error interface
|
|
||||||
var _ error = (*Error)(nil)
|
|
||||||
|
|
||||||
// Types of things in Item
|
|
||||||
const (
|
|
||||||
ItemTypeFolder = "folder"
|
|
||||||
ItemTypeFile = "file"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by /contents
|
|
||||||
type Item struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
ParentFolder string `json:"parentFolder"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Code string `json:"code"`
|
|
||||||
CreateTime int64 `json:"createTime"`
|
|
||||||
ModTime int64 `json:"modTime"`
|
|
||||||
Link string `json:"link"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
MimeType string `json:"mimetype"`
|
|
||||||
ChildrenCount int `json:"childrenCount"`
|
|
||||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
|
||||||
//Public bool `json:"public"`
|
|
||||||
//ServerSelected string `json:"serverSelected"`
|
|
||||||
//Thumbnail string `json:"thumbnail"`
|
|
||||||
//DownloadCount int `json:"downloadCount"`
|
|
||||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
|
||||||
//TotalSize int64 `json:"totalSize"`
|
|
||||||
//ChildrenIDs []string `json:"childrenIds"`
|
|
||||||
Children map[string]*Item `json:"children"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToNativeTime converts a go time to a native time
|
|
||||||
func ToNativeTime(t time.Time) int64 {
|
|
||||||
return t.Unix()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromNativeTime converts native time to a go time
|
|
||||||
func FromNativeTime(t int64) time.Time {
|
|
||||||
return time.Unix(t, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectLink describes a direct link to a file so it can be
|
|
||||||
// downloaded by third parties.
|
|
||||||
type DirectLink struct {
|
|
||||||
ExpireTime int64 `json:"expireTime"`
|
|
||||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
|
||||||
DomainsAllowed []any `json:"domainsAllowed"`
|
|
||||||
Auth []any `json:"auth"`
|
|
||||||
IsReqLink bool `json:"isReqLink"`
|
|
||||||
DirectLink string `json:"directLink"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contents is returned from the /contents call
|
|
||||||
type Contents struct {
|
|
||||||
Error
|
|
||||||
Data struct {
|
|
||||||
Item
|
|
||||||
} `json:"data"`
|
|
||||||
Metadata Metadata `json:"metadata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata is returned when paging is in use
|
|
||||||
type Metadata struct {
|
|
||||||
TotalCount int `json:"totalCount"`
|
|
||||||
TotalPages int `json:"totalPages"`
|
|
||||||
Page int `json:"page"`
|
|
||||||
PageSize int `json:"pageSize"`
|
|
||||||
HasNextPage bool `json:"hasNextPage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountsGetID is the result of /accounts/getid
|
|
||||||
type AccountsGetID struct {
|
|
||||||
Error
|
|
||||||
Data struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stats of storage and traffic
|
|
||||||
type Stats struct {
|
|
||||||
FolderCount int64 `json:"folderCount"`
|
|
||||||
FileCount int64 `json:"fileCount"`
|
|
||||||
Storage int64 `json:"storage"`
|
|
||||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
|
||||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
|
||||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountsGet is the result of /accounts/{id}
|
|
||||||
type AccountsGet struct {
|
|
||||||
Error
|
|
||||||
Data struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
Tier string `json:"tier"`
|
|
||||||
PremiumType string `json:"premiumType"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
RootFolder string `json:"rootFolder"`
|
|
||||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
|
||||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
|
||||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
|
||||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
|
||||||
StatsCurrent Stats `json:"statsCurrent"`
|
|
||||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolderRequest is the input to /contents/createFolder
|
|
||||||
type CreateFolderRequest struct {
|
|
||||||
ParentFolderID string `json:"parentFolderId"`
|
|
||||||
FolderName string `json:"folderName"`
|
|
||||||
ModTime int64 `json:"modTime,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolderResponse is the output from /contents/createFolder
|
|
||||||
type CreateFolderResponse struct {
|
|
||||||
Error
|
|
||||||
Data Item `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteRequest is the input to DELETE /contents
|
|
||||||
type DeleteRequest struct {
|
|
||||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteResponse is the input to DELETE /contents
|
|
||||||
type DeleteResponse struct {
|
|
||||||
Error
|
|
||||||
Data map[string]Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server is an upload server
|
|
||||||
type Server struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Zone string `json:"zone"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the Server
|
|
||||||
func (s *Server) String() string {
|
|
||||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the root URL for the server
|
|
||||||
func (s *Server) Root() string {
|
|
||||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns the upload URL for the server
|
|
||||||
func (s *Server) URL() string {
|
|
||||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServersResponse is the output from /servers
|
|
||||||
type ServersResponse struct {
|
|
||||||
Error
|
|
||||||
Data struct {
|
|
||||||
Servers []Server `json:"servers"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadResponse is returned by POST /contents/uploadfile
|
|
||||||
type UploadResponse struct {
|
|
||||||
Error
|
|
||||||
Data Item `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectLinksRequest specifies the parameters for the direct link
|
|
||||||
type DirectLinksRequest struct {
|
|
||||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
|
||||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
|
||||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
|
||||||
Auth []any `json:"auth,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
|
||||||
type DirectLinksResult struct {
|
|
||||||
Error
|
|
||||||
Data struct {
|
|
||||||
ExpireTime int64 `json:"expireTime"`
|
|
||||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
|
||||||
DomainsAllowed []any `json:"domainsAllowed"`
|
|
||||||
Auth []any `json:"auth"`
|
|
||||||
IsReqLink bool `json:"isReqLink"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
DirectLink string `json:"directLink"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
|
||||||
//
|
|
||||||
// The Value of the attribute to define :
|
|
||||||
// For Attribute "name" : The name of the content (file or folder)
|
|
||||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
|
||||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
|
||||||
// For Attribute "public" : either true or false (folder only)
|
|
||||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
|
||||||
// For Attribute "password" : The password to set (folder only)
|
|
||||||
type UpdateItemRequest struct {
|
|
||||||
Attribute string `json:"attribute"`
|
|
||||||
Value any `json:"attributeValue"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
|
||||||
type UpdateItemResponse struct {
|
|
||||||
Error
|
|
||||||
Data Item `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveRequest is the input to /contents/move
|
|
||||||
type MoveRequest struct {
|
|
||||||
FolderID string `json:"folderId"`
|
|
||||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveResponse is returned by POST /contents/move
|
|
||||||
type MoveResponse struct {
|
|
||||||
Error
|
|
||||||
Data map[string]struct {
|
|
||||||
Error
|
|
||||||
Item `json:"data"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyRequest is the input to /contents/copy
|
|
||||||
type CopyRequest struct {
|
|
||||||
FolderID string `json:"folderId"`
|
|
||||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyResponse is returned by POST /contents/copy
|
|
||||||
type CopyResponse struct {
|
|
||||||
Error
|
|
||||||
Data map[string]struct {
|
|
||||||
Error
|
|
||||||
Item `json:"data"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadServerStatus is returned when fetching the root of an upload server
|
|
||||||
type UploadServerStatus struct {
|
|
||||||
Error
|
|
||||||
Data struct {
|
|
||||||
Server string `json:"server"`
|
|
||||||
Test string `json:"test"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,17 +0,0 @@
|
||||||
// Test Gofile filesystem interface
|
|
||||||
package gofile_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/gofile"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestGoFile:",
|
|
||||||
NilObject: (*gofile.Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -697,7 +697,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
// Don't insert the root directory
|
// Don't insert the root directory
|
||||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
if remote == directory {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// process directory markers as directories
|
// process directory markers as directories
|
||||||
|
@ -1310,11 +1310,10 @@ func (o *Object) Storable() bool {
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url
|
|
||||||
if o.fs.opt.UserProject != "" {
|
if o.fs.opt.UserProject != "" {
|
||||||
url += "&userProject=" + o.fs.opt.UserProject
|
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||||
}
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,8 @@ type MediaItem struct {
|
||||||
CreationTime time.Time `json:"creationTime"`
|
CreationTime time.Time `json:"creationTime"`
|
||||||
Width string `json:"width"`
|
Width string `json:"width"`
|
||||||
Height string `json:"height"`
|
Height string `json:"height"`
|
||||||
Photo struct{} `json:"photo"`
|
Photo struct {
|
||||||
|
} `json:"photo"`
|
||||||
} `json:"mediaMetadata"`
|
} `json:"mediaMetadata"`
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
}
|
}
|
||||||
|
@ -67,7 +68,7 @@ type MediaItems struct {
|
||||||
NextPageToken string `json:"nextPageToken"`
|
NextPageToken string `json:"nextPageToken"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Content categories
|
//Content categories
|
||||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||||
// LANDSCAPES Media items containing landscapes.
|
// LANDSCAPES Media items containing landscapes.
|
||||||
// RECEIPTS Media items containing receipts.
|
// RECEIPTS Media items containing receipts.
|
||||||
|
@ -186,5 +187,5 @@ type BatchCreateResponse struct {
|
||||||
|
|
||||||
// BatchRemoveItems is for removing items from an album
|
// BatchRemoveItems is for removing items from an album
|
||||||
type BatchRemoveItems struct {
|
type BatchRemoveItems struct {
|
||||||
MediaItemIDs []string `json:"mediaItemIds"`
|
MediaItemIds []string `json:"mediaItemIds"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
@ -72,14 +71,6 @@ var (
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure the batcher
|
|
||||||
defaultBatcherOptions = batcher.Options{
|
|
||||||
MaxBatchSize: 50,
|
|
||||||
DefaultTimeoutSync: 1000 * time.Millisecond,
|
|
||||||
DefaultTimeoutAsync: 10 * time.Second,
|
|
||||||
DefaultBatchSizeAsync: 50,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
|
@ -120,7 +111,7 @@ will count towards storage in your Google Account.`)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
},
|
},
|
||||||
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "read_only",
|
Name: "read_only",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Set to make the Google Photos backend read only.
|
Help: `Set to make the Google Photos backend read only.
|
||||||
|
@ -167,7 +158,7 @@ listings and won't be transferred.`,
|
||||||
Default: (encoder.Base |
|
Default: (encoder.Base |
|
||||||
encoder.EncodeCrLf |
|
encoder.EncodeCrLf |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}}...), defaultBatcherOptions.FsOptions("")...),
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,9 +169,6 @@ type Options struct {
|
||||||
StartYear int `config:"start_year"`
|
StartYear int `config:"start_year"`
|
||||||
IncludeArchived bool `config:"include_archived"`
|
IncludeArchived bool `config:"include_archived"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
BatchMode string `config:"batch_mode"`
|
|
||||||
BatchSize int `config:"batch_size"`
|
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
|
@ -199,7 +187,6 @@ type Fs struct {
|
||||||
uploadedMu sync.Mutex // to protect the below
|
uploadedMu sync.Mutex // to protect the below
|
||||||
uploaded dirtree.DirTree // record of uploaded items
|
uploaded dirtree.DirTree // record of uploaded items
|
||||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||||
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
|
@ -280,7 +267,7 @@ func errorHandler(resp *http.Response) error {
|
||||||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||||
body = []byte("Image not found or broken")
|
body = []byte("Image not found or broken")
|
||||||
}
|
}
|
||||||
e := api.Error{
|
var e = api.Error{
|
||||||
Details: api.ErrorDetails{
|
Details: api.ErrorDetails{
|
||||||
Code: resp.StatusCode,
|
Code: resp.StatusCode,
|
||||||
Message: string(body),
|
Message: string(body),
|
||||||
|
@ -325,14 +312,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
albums: map[bool]*albums{},
|
albums: map[bool]*albums{},
|
||||||
uploaded: dirtree.New(),
|
uploaded: dirtree.New(),
|
||||||
}
|
}
|
||||||
batcherOptions := defaultBatcherOptions
|
|
||||||
batcherOptions.Mode = f.opt.BatchMode
|
|
||||||
batcherOptions.Size = f.opt.BatchSize
|
|
||||||
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
|
||||||
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
@ -620,7 +599,9 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
entries = append(entries, entry)
|
if entry != nil {
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -700,7 +681,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||||
Path: "/albums",
|
Path: "/albums",
|
||||||
Parameters: url.Values{},
|
Parameters: url.Values{},
|
||||||
}
|
}
|
||||||
request := api.CreateAlbum{
|
var request = api.CreateAlbum{
|
||||||
Album: &api.Album{
|
Album: &api.Album{
|
||||||
Title: albumTitle,
|
Title: albumTitle,
|
||||||
},
|
},
|
||||||
|
@ -800,13 +781,6 @@ func (f *Fs) Hashes() hash.Set {
|
||||||
return hash.Set(hash.None)
|
return hash.Set(hash.None)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.batcher.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
|
@ -987,82 +961,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// input to the batcher
|
|
||||||
type uploadedItem struct {
|
|
||||||
AlbumID string // desired album
|
|
||||||
UploadToken string // upload ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit a batch of items to albumID returning the errors in errors
|
|
||||||
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
|
|
||||||
// Create the media item from an UploadToken, optionally adding to an album
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/mediaItems:batchCreate",
|
|
||||||
}
|
|
||||||
request := api.BatchCreateRequest{
|
|
||||||
AlbumID: albumID,
|
|
||||||
}
|
|
||||||
itemsInBatch := 0
|
|
||||||
for i := range items {
|
|
||||||
if items[i].AlbumID == albumID {
|
|
||||||
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
|
|
||||||
SimpleMediaItem: api.SimpleMediaItem{
|
|
||||||
UploadToken: items[i].UploadToken,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
itemsInBatch++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var result api.BatchCreateResponse
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to create media item: %w", err)
|
|
||||||
}
|
|
||||||
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
|
|
||||||
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
|
|
||||||
}
|
|
||||||
j := 0
|
|
||||||
for i := range items {
|
|
||||||
if items[i].AlbumID == albumID {
|
|
||||||
if err == nil {
|
|
||||||
media := &result.NewMediaItemResults[j]
|
|
||||||
if media.Status.Code != 0 {
|
|
||||||
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
|
|
||||||
} else {
|
|
||||||
results[i] = &media.MediaItem
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
errors[i] = err
|
|
||||||
}
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by the batcher to commit a batch
|
|
||||||
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
|
|
||||||
// Discover all the AlbumIDs as we have to upload these separately
|
|
||||||
//
|
|
||||||
// Should maybe have one batcher per AlbumID
|
|
||||||
albumIDs := map[string]struct{}{}
|
|
||||||
for i := range items {
|
|
||||||
albumIDs[items[i].AlbumID] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// batch the albums
|
|
||||||
for albumID := range albumIDs {
|
|
||||||
// errors returned in errors
|
|
||||||
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
|
@ -1123,29 +1021,37 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
return errors.New("empty upload token")
|
return errors.New("empty upload token")
|
||||||
}
|
}
|
||||||
|
|
||||||
uploaded := uploadedItem{
|
// Create the media item from an UploadToken, optionally adding to an album
|
||||||
AlbumID: albumID,
|
opts = rest.Opts{
|
||||||
UploadToken: uploadToken,
|
Method: "POST",
|
||||||
|
Path: "/mediaItems:batchCreate",
|
||||||
}
|
}
|
||||||
|
var request = api.BatchCreateRequest{
|
||||||
// Save the upload into an album
|
AlbumID: albumID,
|
||||||
var info *api.MediaItem
|
NewMediaItems: []api.NewMediaItem{
|
||||||
if o.fs.batcher.Batching() {
|
{
|
||||||
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
|
SimpleMediaItem: api.SimpleMediaItem{
|
||||||
} else {
|
UploadToken: uploadToken,
|
||||||
errors := make([]error, 1)
|
},
|
||||||
results := make([]*api.MediaItem, 1)
|
},
|
||||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
},
|
||||||
if err != nil {
|
|
||||||
err = errors[0]
|
|
||||||
info = results[0]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
var result api.BatchCreateResponse
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to commit batch: %w", err)
|
return fmt.Errorf("failed to create media item: %w", err)
|
||||||
}
|
}
|
||||||
|
if len(result.NewMediaItemResults) != 1 {
|
||||||
o.setMetaData(info)
|
return errors.New("bad response to BatchCreate wrong number of items")
|
||||||
|
}
|
||||||
|
mediaItemResult := result.NewMediaItemResults[0]
|
||||||
|
if mediaItemResult.Status.Code != 0 {
|
||||||
|
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||||
|
}
|
||||||
|
o.setMetaData(&mediaItemResult.MediaItem)
|
||||||
|
|
||||||
// Add upload to internal storage
|
// Add upload to internal storage
|
||||||
if pattern.isUpload {
|
if pattern.isUpload {
|
||||||
|
@ -1172,8 +1078,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||||
NoResponse: true,
|
NoResponse: true,
|
||||||
}
|
}
|
||||||
request := api.BatchRemoveItems{
|
var request = api.BatchRemoveItems{
|
||||||
MediaItemIDs: []string{o.id},
|
MediaItemIds: []string{o.id},
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
|
|
@ -38,7 +38,7 @@ type dirPattern struct {
|
||||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dirPatterns is a slice of all the directory patterns
|
// dirPatters is a slice of all the directory patterns
|
||||||
type dirPatterns []dirPattern
|
type dirPatterns []dirPattern
|
||||||
|
|
||||||
// patterns describes the layout of the google photos backend file system.
|
// patterns describes the layout of the google photos backend file system.
|
||||||
|
|
|
@ -80,14 +80,6 @@ func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||||
}
|
}
|
||||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||||
}
|
}
|
||||||
if f.db == nil {
|
|
||||||
if f.opt.MaxAge == 0 {
|
|
||||||
fs.Errorf(f, "db not found. (disabled with max_age = 0)")
|
|
||||||
} else {
|
|
||||||
fs.Errorf(f, "db not found.")
|
|
||||||
}
|
|
||||||
return kv.ErrInactive
|
|
||||||
}
|
|
||||||
op := &kvDump{
|
op := &kvDump{
|
||||||
full: full,
|
full: full,
|
||||||
root: root,
|
root: root,
|
||||||
|
|
|
@ -114,13 +114,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||||
root: rpath,
|
root: rpath,
|
||||||
opt: opt,
|
opt: opt,
|
||||||
}
|
}
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
baseFeatures := baseFs.Features()
|
baseFeatures := baseFs.Features()
|
||||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
||||||
|
|
||||||
|
@ -164,21 +157,16 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||||
}
|
}
|
||||||
|
|
||||||
stubFeatures := &fs.Features{
|
stubFeatures := &fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
IsLocal: true,
|
IsLocal: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ReadMetadata: true,
|
ReadMetadata: true,
|
||||||
WriteMetadata: true,
|
WriteMetadata: true,
|
||||||
UserMetadata: true,
|
UserMetadata: true,
|
||||||
ReadDirMetadata: true,
|
PartialUploads: true,
|
||||||
WriteDirMetadata: true,
|
|
||||||
WriteDirSetModTime: true,
|
|
||||||
UserDirMetadata: true,
|
|
||||||
DirModTimeUpdatesOnWrite: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}
|
}
|
||||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||||
|
|
||||||
|
@ -346,22 +334,6 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
return errors.New("MergeDirs not supported")
|
return errors.New("MergeDirs not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirSetModTime sets the directory modtime for dir
|
|
||||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
||||||
if do := f.Fs.Features().DirSetModTime; do != nil {
|
|
||||||
return do(ctx, dir, modTime)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirMetadata makes the root directory of the Fs object
|
|
||||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
||||||
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
|
||||||
return do(ctx, dir, metadata)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
// DirCacheFlush resets the directory cache - used in testing
|
||||||
// as an optional interface
|
// as an optional interface
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
|
@ -439,9 +411,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||||
if f.db != nil && !f.db.IsStopped() {
|
err = f.db.Stop(false)
|
||||||
err = f.db.Stop(false)
|
|
||||||
}
|
|
||||||
if do := f.Fs.Features().Shutdown; do != nil {
|
if do := f.Fs.Features().Shutdown; do != nil {
|
||||||
if err2 := do(ctx); err2 != nil {
|
if err2 := do(ctx); err2 != nil {
|
||||||
err = err2
|
err = err2
|
||||||
|
@ -535,17 +505,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||||
return do.Metadata(ctx)
|
return do.Metadata(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMetadata sets metadata for an Object
|
|
||||||
//
|
|
||||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
||||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
||||||
do, ok := o.Object.(fs.SetMetadataer)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do.SetMetadata(ctx, metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
@ -562,8 +521,6 @@ var (
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
_ fs.Wrapper = (*Fs)(nil)
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
_ fs.MergeDirser = (*Fs)(nil)
|
||||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
|
||||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
|
|
|
@ -60,11 +60,9 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||||
assert.NotNil(t, dst)
|
assert.NotNil(t, dst)
|
||||||
|
|
||||||
// check that hash was created
|
// check that hash was created
|
||||||
if f.opt.MaxAge > 0 {
|
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
assert.NoError(t, err)
|
||||||
assert.NoError(t, err)
|
assert.NotEmpty(t, hash)
|
||||||
assert.NotEmpty(t, hash)
|
|
||||||
}
|
|
||||||
//t.Logf("hash is %q", hash)
|
//t.Logf("hash is %q", hash)
|
||||||
_ = operations.Purge(ctx, f, dirName)
|
_ = operations.Purge(ctx, f, dirName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
|
||||||
NilObject: (*hasher.Object)(nil),
|
NilObject: (*hasher.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
"OpenChunkWriter",
|
|
||||||
},
|
},
|
||||||
UnimplementableObjectMethods: []string{},
|
UnimplementableObjectMethods: []string{},
|
||||||
}
|
}
|
||||||
|
@ -37,9 +36,4 @@ func TestIntegration(t *testing.T) {
|
||||||
opt.QuickTestOK = true
|
opt.QuickTestOK = true
|
||||||
}
|
}
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
// test again with MaxAge = 0
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,14 +71,7 @@ func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string,
|
||||||
f := o.f
|
f := o.f
|
||||||
if f.passHashes.Contains(hashType) {
|
if f.passHashes.Contains(hashType) {
|
||||||
fs.Debugf(o, "pass %s", hashType)
|
fs.Debugf(o, "pass %s", hashType)
|
||||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
return o.Object.Hash(ctx, hashType)
|
||||||
if hashVal != "" {
|
|
||||||
return hashVal, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "error passing %s: %v", hashType, err)
|
|
||||||
}
|
|
||||||
fs.Debugf(o, "passed %s is blank -- trying other methods", hashType)
|
|
||||||
}
|
}
|
||||||
if !f.suppHashes.Contains(hashType) {
|
if !f.suppHashes.Contains(hashType) {
|
||||||
fs.Debugf(o, "unsupp %s", hashType)
|
fs.Debugf(o, "unsupp %s", hashType)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
||||||
|
@ -20,7 +21,6 @@ import (
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fs represents a HDFS server
|
// Fs represents a HDFS server
|
||||||
|
@ -31,15 +31,8 @@ type Fs struct {
|
||||||
opt Options // options for this backend
|
opt Options // options for this backend
|
||||||
ci *fs.ConfigInfo // global config
|
ci *fs.ConfigInfo // global config
|
||||||
client *hdfs.Client
|
client *hdfs.Client
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
minSleep = 20 * time.Millisecond
|
|
||||||
maxSleep = 10 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
)
|
|
||||||
|
|
||||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||||
func getKerberosClient() (*krb.Client, error) {
|
func getKerberosClient() (*krb.Client, error) {
|
||||||
configPath := os.Getenv("KRB5_CONFIG")
|
configPath := os.Getenv("KRB5_CONFIG")
|
||||||
|
@ -92,7 +85,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
}
|
}
|
||||||
|
|
||||||
options := hdfs.ClientOptions{
|
options := hdfs.ClientOptions{
|
||||||
Addresses: opt.Namenode,
|
Addresses: []string{opt.Namenode},
|
||||||
UseDatanodeHostname: false,
|
UseDatanodeHostname: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +114,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
ci: fs.GetConfig(ctx),
|
ci: fs.GetConfig(ctx),
|
||||||
client: client,
|
client: client,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
|
@ -149,7 +141,7 @@ func (f *Fs) Root() string {
|
||||||
|
|
||||||
// String returns a description of the FS
|
// String returns a description of the FS
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
return fmt.Sprintf("hdfs://%s/%s", f.opt.Namenode, f.root)
|
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
// Features returns the optional features of this Fs
|
||||||
|
@ -209,8 +201,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
size: x.Size(),
|
size: x.Size(),
|
||||||
modTime: x.ModTime(),
|
modTime: x.ModTime()})
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries, nil
|
return entries, nil
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
// Package hdfs provides an interface to the HDFS storage system.
|
// Package hdfs provides an interface to the HDFS storage system.
|
||||||
package hdfs
|
package hdfs
|
||||||
|
@ -19,10 +20,9 @@ func init() {
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "namenode",
|
Name: "namenode",
|
||||||
Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.",
|
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||||
Required: true,
|
Required: true,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
Default: fs.CommaSepList{},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "username",
|
Name: "username",
|
||||||
Help: "Hadoop user name.",
|
Help: "Hadoop user name.",
|
||||||
|
@ -65,7 +65,7 @@ and 'privacy'. Used only with KERBEROS enabled.`,
|
||||||
|
|
||||||
// Options for this backend
|
// Options for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Namenode fs.CommaSepList `config:"namenode"`
|
Namenode string `config:"namenode"`
|
||||||
Username string `config:"username"`
|
Username string `config:"username"`
|
||||||
ServicePrincipalName string `config:"service_principal_name"`
|
ServicePrincipalName string `config:"service_principal_name"`
|
||||||
DataTransferProtection string `config:"data_transfer_protection"`
|
DataTransferProtection string `config:"data_transfer_protection"`
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// Test HDFS filesystem interface
|
// Test HDFS filesystem interface
|
||||||
|
|
||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs_test
|
package hdfs_test
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,6 @@
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9
|
//go:build plan9
|
||||||
|
// +build plan9
|
||||||
|
|
||||||
// Package hdfs provides an interface to the HDFS storage system.
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
|
@ -1,15 +1,14 @@
|
||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/colinmarc/hdfs/v2"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
@ -107,7 +106,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||||
|
|
||||||
// Update object
|
// Update object
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
realpath := o.fs.realpath(o.remote)
|
realpath := o.fs.realpath(src.Remote())
|
||||||
dirname := path.Dir(realpath)
|
dirname := path.Dir(realpath)
|
||||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||||
|
|
||||||
|
@ -142,23 +141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the datanodes have acknowledged all writes but not yet
|
err = out.Close()
|
||||||
// to the namenode, FileWriter.Close can return ErrReplicating
|
|
||||||
// (wrapped in an os.PathError). This indicates that all data
|
|
||||||
// has been written, but the lease is still open for the file.
|
|
||||||
//
|
|
||||||
// It is safe in this case to either ignore the error (and let
|
|
||||||
// the lease expire on its own) or to call Close multiple
|
|
||||||
// times until it completes without an error. The Java client,
|
|
||||||
// for context, always chooses to retry, with exponential
|
|
||||||
// backoff.
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
err := out.Close()
|
|
||||||
if err == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return errors.Is(err, hdfs.ErrReplicating), err
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanup()
|
cleanup()
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -762,12 +762,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown shutdown the fs
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.tokenRenewer.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs.
|
// Fs returns the parent Fs.
|
||||||
|
@ -1003,7 +997,6 @@ var (
|
||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|
|
@ -36,7 +36,6 @@ func init() {
|
||||||
Name: "http",
|
Name: "http",
|
||||||
Description: "HTTP",
|
Description: "HTTP",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||||
|
@ -89,10 +88,6 @@ that directory listings are much quicker, but rclone won't have the times or
|
||||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "no_escape",
|
|
||||||
Help: "Do not escape URL metacharacters in path names.",
|
|
||||||
Default: false,
|
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
|
@ -104,7 +99,6 @@ type Options struct {
|
||||||
NoSlash bool `config:"no_slash"`
|
NoSlash bool `config:"no_slash"`
|
||||||
NoHead bool `config:"no_head"`
|
NoHead bool `config:"no_head"`
|
||||||
Headers fs.CommaSepList `config:"headers"`
|
Headers fs.CommaSepList `config:"headers"`
|
||||||
NoEscape bool `config:"no_escape"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
// Fs stores the interface to the remote HTTP files
|
||||||
|
@ -216,42 +210,6 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
|
||||||
return createFileResult()
|
return createFileResult()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the http connection with opt
|
|
||||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
|
||||||
if len(opt.Headers)%2 != 0 {
|
|
||||||
return false, errors.New("odd number of headers supplied")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
|
||||||
opt.Endpoint += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the endpoint and stick the root onto it
|
|
||||||
base, err := url.Parse(opt.Endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
u, err := rest.URLJoin(base, rest.URLPathEscape(f.root))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
|
|
||||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
|
||||||
fs.Debugf(nil, "Root: %s", endpoint)
|
|
||||||
u, err = url.Parse(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update f with the new parameters
|
|
||||||
f.httpClient = client
|
|
||||||
f.endpoint = u
|
|
||||||
f.endpointURL = u.String()
|
|
||||||
return isFile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
@ -262,23 +220,47 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(opt.Headers)%2 != 0 {
|
||||||
|
return nil, errors.New("odd number of headers supplied")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||||
|
opt.Endpoint += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the endpoint and stick the root onto it
|
||||||
|
base, err := url.Parse(opt.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := fshttp.NewClient(ctx)
|
||||||
|
|
||||||
|
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||||
|
fs.Debugf(nil, "Root: %s", endpoint)
|
||||||
|
u, err = url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
ci: ci,
|
ci: ci,
|
||||||
|
httpClient: client,
|
||||||
|
endpoint: u,
|
||||||
|
endpointURL: u.String(),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Make the http connection
|
|
||||||
isFile, err := f.httpConnection(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if isFile {
|
if isFile {
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
|
@ -331,11 +313,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
|
||||||
// Join's the remote onto the base URL
|
// Join's the remote onto the base URL
|
||||||
func (f *Fs) url(remote string) string {
|
func (f *Fs) url(remote string) string {
|
||||||
if f.opt.NoEscape {
|
|
||||||
// Directly concatenate without escaping, no_escape behavior
|
|
||||||
return f.endpointURL + remote
|
|
||||||
}
|
|
||||||
// Default behavior
|
|
||||||
return f.endpointURL + rest.URLPathEscape(remote)
|
return f.endpointURL + rest.URLPathEscape(remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,66 +685,10 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.contentType
|
return o.contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
|
||||||
Name: "set",
|
|
||||||
Short: "Set command for updating the config parameters.",
|
|
||||||
Long: `This set command can be used to update the config parameters
|
|
||||||
for a running http backend.
|
|
||||||
|
|
||||||
Usage Examples:
|
|
||||||
|
|
||||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
|
||||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
|
||||||
|
|
||||||
This rebuilds the connection to the http backend when it is called with
|
|
||||||
the new parameters. Only new parameters need be passed as the values
|
|
||||||
will default to those currently in use.
|
|
||||||
|
|
||||||
It doesn't return anything.
|
|
||||||
`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
switch name {
|
|
||||||
case "set":
|
|
||||||
newOpt := f.opt
|
|
||||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading config: %w", err)
|
|
||||||
}
|
|
||||||
_, err = f.httpConnection(ctx, &newOpt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("updating session: %w", err)
|
|
||||||
}
|
|
||||||
f.opt = newOpt
|
|
||||||
keys := []string{}
|
|
||||||
for k := range opt {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
|
||||||
return nil, nil
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.PutStreamer = &Fs{}
|
_ fs.PutStreamer = &Fs{}
|
||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.Commander = &Fs{}
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
// Package client provides a client for interacting with the ImageKit API.
|
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ImageKit main struct
|
|
||||||
type ImageKit struct {
|
|
||||||
Prefix string
|
|
||||||
UploadPrefix string
|
|
||||||
Timeout int64
|
|
||||||
UploadTimeout int64
|
|
||||||
PrivateKey string
|
|
||||||
PublicKey string
|
|
||||||
URLEndpoint string
|
|
||||||
HTTPClient *rest.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewParams is a struct to define parameters to imagekit
|
|
||||||
type NewParams struct {
|
|
||||||
PrivateKey string
|
|
||||||
PublicKey string
|
|
||||||
URLEndpoint string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns ImageKit object from environment variables
|
|
||||||
func New(ctx context.Context, params NewParams) (*ImageKit, error) {
|
|
||||||
|
|
||||||
privateKey := params.PrivateKey
|
|
||||||
publicKey := params.PublicKey
|
|
||||||
endpointURL := params.URLEndpoint
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case privateKey == "":
|
|
||||||
return nil, fmt.Errorf("ImageKit.io URL endpoint is required")
|
|
||||||
case publicKey == "":
|
|
||||||
return nil, fmt.Errorf("ImageKit.io public key is required")
|
|
||||||
case endpointURL == "":
|
|
||||||
return nil, fmt.Errorf("ImageKit.io private key is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
cliCtx, cliCfg := fs.AddConfig(ctx)
|
|
||||||
|
|
||||||
cliCfg.UserAgent = "rclone/imagekit"
|
|
||||||
client := rest.NewClient(fshttp.NewClient(cliCtx))
|
|
||||||
|
|
||||||
client.SetUserPass(privateKey, "")
|
|
||||||
client.SetHeader("Accept", "application/json")
|
|
||||||
|
|
||||||
return &ImageKit{
|
|
||||||
Prefix: "https://api.imagekit.io/v2",
|
|
||||||
UploadPrefix: "https://upload.imagekit.io/api/v2",
|
|
||||||
Timeout: 60,
|
|
||||||
UploadTimeout: 3600,
|
|
||||||
PrivateKey: params.PrivateKey,
|
|
||||||
PublicKey: params.PublicKey,
|
|
||||||
URLEndpoint: params.URLEndpoint,
|
|
||||||
HTTPClient: client,
|
|
||||||
}, nil
|
|
||||||
}
|
|
|
@ -1,252 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"gopkg.in/validator.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files.
|
|
||||||
type FilesOrFolderParam struct {
|
|
||||||
Path string `json:"path,omitempty"`
|
|
||||||
Limit int `json:"limit,omitempty"`
|
|
||||||
Skip int `json:"skip,omitempty"`
|
|
||||||
SearchQuery string `json:"searchQuery,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AITag represents an AI tag for a media library file.
|
|
||||||
type AITag struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Confidence float32 `json:"confidence"`
|
|
||||||
Source string `json:"source"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// File represents media library File details.
|
|
||||||
type File struct {
|
|
||||||
FileID string `json:"fileId"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
FilePath string `json:"filePath"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
VersionInfo map[string]string `json:"versionInfo"`
|
|
||||||
IsPrivateFile *bool `json:"isPrivateFile"`
|
|
||||||
CustomCoordinates *string `json:"customCoordinates"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
Thumbnail string `json:"thumbnail"`
|
|
||||||
FileType string `json:"fileType"`
|
|
||||||
Mime string `json:"mime"`
|
|
||||||
Height int `json:"height"`
|
|
||||||
Width int `json:"Width"`
|
|
||||||
Size uint64 `json:"size"`
|
|
||||||
HasAlpha bool `json:"hasAlpha"`
|
|
||||||
CustomMetadata map[string]any `json:"customMetadata,omitempty"`
|
|
||||||
EmbeddedMetadata map[string]any `json:"embeddedMetadata"`
|
|
||||||
CreatedAt time.Time `json:"createdAt"`
|
|
||||||
UpdatedAt time.Time `json:"updatedAt"`
|
|
||||||
Tags []string `json:"tags"`
|
|
||||||
AITags []AITag `json:"AITags"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Folder represents media library Folder details.
|
|
||||||
type Folder struct {
|
|
||||||
*File
|
|
||||||
FolderPath string `json:"folderPath"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolderParam represents parameter to create folder api
|
|
||||||
type CreateFolderParam struct {
|
|
||||||
FolderName string `validate:"nonzero" json:"folderName"`
|
|
||||||
ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFolderParam represents parameter to delete folder api
|
|
||||||
type DeleteFolderParam struct {
|
|
||||||
FolderPath string `validate:"nonzero" json:"folderPath"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveFolderParam represents parameter to move folder api
|
|
||||||
type MoveFolderParam struct {
|
|
||||||
SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"`
|
|
||||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// JobIDResponse respresents response struct with JobID for folder operations
|
|
||||||
type JobIDResponse struct {
|
|
||||||
JobID string `json:"jobId"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// JobStatus represents response Data to job status api
|
|
||||||
type JobStatus struct {
|
|
||||||
JobID string `json:"jobId"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// File represents media library File details.
|
|
||||||
func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) {
|
|
||||||
data := &File{}
|
|
||||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: fmt.Sprintf("/files/%s/details", fileID),
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
IgnoreStatus: true,
|
|
||||||
}, nil, data)
|
|
||||||
|
|
||||||
return response, data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
|
||||||
func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) {
|
|
||||||
var SearchQuery = `type = "file"`
|
|
||||||
|
|
||||||
if includeVersion {
|
|
||||||
SearchQuery = `type IN ["file", "file-version"]`
|
|
||||||
}
|
|
||||||
if params.SearchQuery != "" {
|
|
||||||
SearchQuery = params.SearchQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
parameters := url.Values{}
|
|
||||||
|
|
||||||
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
|
||||||
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
|
||||||
parameters.Set("path", params.Path)
|
|
||||||
parameters.Set("searchQuery", SearchQuery)
|
|
||||||
|
|
||||||
data := &[]File{}
|
|
||||||
|
|
||||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/files",
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
Parameters: parameters,
|
|
||||||
}, nil, data)
|
|
||||||
|
|
||||||
return response, data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFile removes file by FileID from media library
|
|
||||||
func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if fileID == "" {
|
|
||||||
return nil, errors.New("fileID can not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "DELETE",
|
|
||||||
Path: fmt.Sprintf("/files/%s", fileID),
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
NoResponse: true,
|
|
||||||
}, nil, nil)
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
|
||||||
func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) {
|
|
||||||
var SearchQuery = `type = "folder"`
|
|
||||||
|
|
||||||
if params.SearchQuery != "" {
|
|
||||||
SearchQuery = params.SearchQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
parameters := url.Values{}
|
|
||||||
|
|
||||||
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
|
||||||
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
|
||||||
parameters.Set("path", params.Path)
|
|
||||||
parameters.Set("searchQuery", SearchQuery)
|
|
||||||
|
|
||||||
data := &[]Folder{}
|
|
||||||
|
|
||||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/files",
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
Parameters: parameters,
|
|
||||||
}, nil, data)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return resp, data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, data, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolder creates a new folder in media library
|
|
||||||
func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if err = validator.Validate(¶m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/folder",
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
NoResponse: true,
|
|
||||||
}, param, nil)
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFolder removes the folder from media library
|
|
||||||
func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if err = validator.Validate(¶m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "DELETE",
|
|
||||||
Path: "/folder",
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
NoResponse: true,
|
|
||||||
}, param, nil)
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveFolder moves given folder to new path in media library
|
|
||||||
func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) {
|
|
||||||
var err error
|
|
||||||
var response = &JobIDResponse{}
|
|
||||||
|
|
||||||
if err = validator.Validate(¶m); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
Path: "bulkJobs/moveFolder",
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
}, param, response)
|
|
||||||
|
|
||||||
return resp, response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// BulkJobStatus retrieves the status of a bulk job by job ID.
|
|
||||||
func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) {
|
|
||||||
var err error
|
|
||||||
var response = &JobStatus{}
|
|
||||||
|
|
||||||
if jobID == "" {
|
|
||||||
return nil, nil, errors.New("jobId can not be blank")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "bulkJobs/" + jobID,
|
|
||||||
RootURL: ik.Prefix,
|
|
||||||
}, nil, response)
|
|
||||||
|
|
||||||
return resp, response, err
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue