forked from TrueCloudLab/rclone
Compare commits
1 commit
tcl/master
...
fix-7743-c
Author | SHA1 | Date | |
---|---|---|---|
|
8584bef006 |
879 changed files with 91108 additions and 148847 deletions
|
@ -1,45 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage, bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory -->
|
||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
||||
<!--- uncomment the following phrase: -->
|
||||
|
||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. -->
|
||||
|
||||
1.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used:
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
|
@ -1 +0,0 @@
|
|||
blank_issues_enabled: false
|
|
@ -1,24 +0,0 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
|
@ -1,20 +0,0 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
@ -1,67 +0,0 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
- name: Run linters
|
||||
run: make check
|
||||
test:
|
||||
name: Test
|
||||
runs-on: oci-runner
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Tests for the FrostFS backend
|
||||
env:
|
||||
RESTIC_TEST_FUSE: false
|
||||
AIO_IMAGE: truecloudlab/frostfs-aio
|
||||
AIO_VERSION: 1.7.0-nightly.4
|
||||
RCLONE_CONFIG: /config/rclone.conf
|
||||
|
||||
# run only tests related to FrostFS backend
|
||||
run: |-
|
||||
podman-service.sh
|
||||
podman info
|
||||
|
||||
mkdir /config
|
||||
printf "[TestFrostFS]\ntype = frostfs\nendpoint = localhost:8080\nwallet = /config/wallet.json\nplacement_policy = REP 1\nrequest_timeout = 20s\nconnection_timeout = 21s" > /config/rclone.conf
|
||||
|
||||
echo "Run frostfs aio container"
|
||||
docker run -d --net=host --name aio $AIO_IMAGE:$AIO_VERSION --restart always -p 8080:8080
|
||||
|
||||
echo "Wait for frostfs to start"
|
||||
until docker exec aio curl --fail http://localhost:8083 > /dev/null 2>&1; do sleep 0.2; done;
|
||||
|
||||
echo "Issue creds"
|
||||
docker exec aio /usr/bin/issue-creds.sh native
|
||||
echo "Copy wallet"
|
||||
docker cp aio:/config/user-wallet.json /config/wallet.json
|
||||
|
||||
echo "Start tests"
|
||||
go test -v github.com/rclone/rclone/backend/frostfs
|
4
.gitattributes
vendored
4
.gitattributes
vendored
|
@ -1,7 +1,3 @@
|
|||
# Go writes go.mod and go.sum with lf even on windows
|
||||
go.mod text eol=lf
|
||||
go.sum text eol=lf
|
||||
|
||||
# Ignore generated files in GitHub language statistics and diffs
|
||||
/MANUAL.* linguist-generated=true
|
||||
/rclone.1 linguist-generated=true
|
||||
|
|
110
.github/workflows/build.yml
vendored
110
.github/workflows/build.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
|
@ -43,14 +43,14 @@ jobs:
|
|||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.22.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
|
@ -59,14 +59,14 @@ jobs:
|
|||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
|
@ -76,20 +76,20 @@ jobs:
|
|||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.22.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.21
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
|
@ -124,7 +124,7 @@ jobs:
|
|||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
|
@ -137,7 +137,7 @@ jobs:
|
|||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
brew install git-annex git-annex-remote-rclone
|
||||
brew install git-annex
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
|
@ -168,6 +168,14 @@ jobs:
|
|||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
|
@ -223,71 +231,21 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
@ -311,7 +269,15 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
|
|
@ -56,7 +56,7 @@ jobs:
|
|||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
|
|
|
@ -32,27 +32,15 @@ jobs:
|
|||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
|
|
15
.github/workflows/notify.yml
vendored
15
.github/workflows/notify.yml
vendored
|
@ -1,15 +0,0 @@
|
|||
name: Notify users based on issue labels
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
notify:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jenschelkopf/issue-label-notification-action@1.3
|
||||
with:
|
||||
token: ${{ secrets.NOTIFY_ACTION_TOKEN }}
|
||||
recipients: |
|
||||
Support Contract=@rclone/support
|
28
.github/workflows/winget.yml
vendored
28
.github/workflows/winget.yml
vendored
|
@ -1,14 +1,14 @@
|
|||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -3,9 +3,7 @@ _junk/
|
|||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
/docs/public/
|
||||
/docs/.hugo_build.lock
|
||||
/docs/static/img/logos/
|
||||
docs/public
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
|
@ -18,5 +16,6 @@ fuzz-build.zip
|
|||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
/docs/static/img/logos/
|
||||
resource_windows_*.syso
|
||||
.devcontainer
|
||||
|
|
|
@ -13,7 +13,6 @@ linters:
|
|||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
@ -99,46 +98,3 @@ linters-settings:
|
|||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
|
|
@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
|||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
|
@ -508,7 +508,7 @@ You'll need to modify the following files
|
|||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
|
|
@ -21,8 +21,6 @@ Current active maintainers of rclone are:
|
|||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
|
60356
MANUAL.html
generated
60356
MANUAL.html
generated
File diff suppressed because it is too large
Load diff
28546
MANUAL.txt
generated
28546
MANUAL.txt
generated
File diff suppressed because it is too large
Load diff
31
Makefile
31
Makefile
|
@ -36,14 +36,13 @@ ifdef BETA_SUBDIR
|
|||
endif
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := beta.rclone.org:
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
LDFLAGS=--ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)"
|
||||
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
|
@ -51,7 +50,7 @@ rclone:
|
|||
ifeq ($(GO_OS),windows)
|
||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
go build -v $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
ifeq ($(GO_OS),windows)
|
||||
rm resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
|
@ -60,7 +59,7 @@ endif
|
|||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install $(LDFLAGS) $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
|
@ -88,13 +87,13 @@ test: rclone test_all
|
|||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
|
@ -168,7 +167,7 @@ website:
|
|||
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
|
||||
|
||||
upload_website: website
|
||||
rclone -v sync docs/public www.rclone.org:
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
upload_test_website: website
|
||||
rclone -P sync docs/public test-rclone-org:
|
||||
|
@ -195,8 +194,8 @@ check_sign:
|
|||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -P copy build/ downloads.rclone.org:/$(TAG)
|
||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "downloads.rclone.org:/$(TAG)/$$i" "downloads.rclone.org:/$$j"'
|
||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
@ -206,7 +205,7 @@ cross: doc
|
|||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
log_since_last_release:
|
||||
|
@ -219,18 +218,18 @@ ci_upload:
|
|||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
|
@ -239,7 +238,7 @@ fetch_binaries:
|
|||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
||||
cd docs && hugo server -v -w --disableFastRender
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
|
|
|
@ -55,14 +55,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
|
@ -76,7 +73,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
|
@ -96,7 +92,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
|
@ -105,7 +100,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
|
|
51
RELEASE.md
51
RELEASE.md
|
@ -37,44 +37,18 @@ This file describes how to make the various kinds of releases
|
|||
|
||||
## Update dependencies
|
||||
|
||||
Early in the next release cycle update the dependencies.
|
||||
Early in the next release cycle update the dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* `make updatedirect`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* Fix anything which doesn't compile at this point and commit changes here
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
And try again.
|
||||
|
||||
Optionally upgrade the direct and indirect dependencies. This is very
|
||||
likely to fail if the manual method was used abve - in that case
|
||||
ignore it as it is too time consuming to fix.
|
||||
|
||||
* `make update`
|
||||
* `make GOTAGS=cmount`
|
||||
* `make compiletest`
|
||||
* make updatedirect
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* roll back any updates which didn't compile
|
||||
* `git commit -a -v --amend`
|
||||
* git commit -a -v --amend
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
|
@ -83,9 +57,6 @@ doing that so it may be necessary to roll back dependencies to the
|
|||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
@ -168,8 +139,6 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
|||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.68.2
|
||||
v1.67.0
|
||||
|
|
|
@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
|
|
@ -17,10 +17,7 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/frostfs"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
|
@ -42,7 +39,6 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
@ -711,11 +712,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.svc
|
||||
var (
|
||||
cred azcore.TokenCredential
|
||||
sharedKeyCred *service.SharedKeyCredential
|
||||
anonymous = false
|
||||
)
|
||||
switch {
|
||||
case opt.EnvAuth:
|
||||
|
@ -875,9 +875,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
|
@ -907,12 +904,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("create client failed: %w", err)
|
||||
}
|
||||
} else if anonymous {
|
||||
// Anonymous public access
|
||||
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create public client failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.svc == nil {
|
||||
|
@ -1098,7 +1089,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
|||
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
|
@ -2094,6 +2085,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
|
@ -2116,7 +2108,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||
package azurefiles
|
||||
|
@ -1035,10 +1036,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else if size != o.Size() {
|
||||
} else {
|
||||
// Resize the file if needed
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package azurefiles
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package azurefiles
|
||||
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||
package azurefiles
|
||||
|
|
|
@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ below will cause b2 to return specific errors:
|
|||
* "force_cap_exceeded"
|
||||
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/docs/cloud-storage-integration-checklist).`,
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
|
@ -244,7 +244,7 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
|||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// See: https://www.backblaze.com/docs/cloud-storage-files
|
||||
// See: https://www.backblaze.com/b2/docs/files.html
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// FIXME: allow /, but not leading, trailing or double
|
||||
Default: (encoder.Display |
|
||||
|
@ -299,14 +299,13 @@ type Fs struct {
|
|||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -364,7 +363,7 @@ var retryErrorCodes = []int{
|
|||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
// shouldRetryNoReauth returns a boolean as to whether this resp and err
|
||||
// shouldRetryNoAuth returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
|
@ -1567,7 +1566,7 @@ func (o *Object) Size() int64 {
|
|||
//
|
||||
// Make sure it is lower case.
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/docs/cloud-storage-upload-files-with-the-native-api
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) string {
|
||||
const unverified = "unverified:"
|
||||
|
@ -1594,14 +1593,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
|||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
err = o.parseTimeString(Info[timeKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
return o.parseTimeString(Info[timeKey])
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
|
@ -1703,16 +1695,6 @@ func timeString(modTime time.Time) string {
|
|||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time
|
||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
|
@ -1720,12 +1702,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
modTime, err := parseTimeStringHelper(timeString)
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = modTime
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1879,14 +1861,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
@ -1984,7 +1958,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
|
@ -2016,10 +1990,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
|
@ -2124,36 +2095,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||
// When metadata support is added to b2, this method will need a more generic name
|
||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k = strings.ToLower(k)
|
||||
// For now, the only metadata we're concerned with is "mtime"
|
||||
switch k {
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
default:
|
||||
// Do nothing for now
|
||||
}
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
|
@ -2185,7 +2126,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
// https://www.backblaze.com/docs/cloud-storage-native-api-string-encoding
|
||||
// https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
|
||||
var encodeTest = []struct {
|
||||
fullyEncoded string
|
||||
|
@ -184,126 +184,57 @@ func TestParseTimeString(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||
var headers = make(map[string]string)
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
headers[k[len(headerPrefix):]] = v
|
||||
}
|
||||
// This is adapted from the s3 equivalent.
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We currently have a limited amount of metadata to test with B2
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
return headers
|
||||
}
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||
t.Run(what, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(size)
|
||||
require.NoError(t, err)
|
||||
original := random.String(int(ss))
|
||||
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
if chunkSize != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(chunkSize)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadChunkSize(ss)
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
if uploadCutoff != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(uploadCutoff)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadCutoff(ss)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||
|
||||
"mtime": "2009-05-06T04:05:06.499Z",
|
||||
}
|
||||
|
||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||
options := []fs.OpenOption{
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||
}
|
||||
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// X-Bz-Info-a & X-Bz-Info-b
|
||||
optMetadata := OpenOptionToMetaData(options)
|
||||
for k, v := range optMetadata {
|
||||
got := gotMetadata.Info[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// 1 kB regular file
|
||||
f.internalTestMetadata(t, "1kiB", "", "")
|
||||
|
||||
// 10 MiB large file
|
||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||
}
|
||||
|
||||
func sha1Sum(t *testing.T, s string) string {
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/docs/cloud-storage-large-files
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
|
@ -91,7 +91,7 @@ type largeUpload struct {
|
|||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
|
@ -104,6 +104,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
|
@ -113,27 +118,12 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||
if newInfo == nil {
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Custom upload headers - remove header prefix since they are sent in the body
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
request.Info[k[len(headerPrefix):]] = v
|
||||
} else {
|
||||
optionsToSend = append(optionsToSend, option)
|
||||
}
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
|
@ -144,11 +134,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
Options: optionsToSend,
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
|
23
backend/cache/cache.go
vendored
23
backend/cache/cache.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
@ -409,16 +410,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
88
backend/cache/cache_internal_test.go
vendored
88
backend/cache/cache_internal_test.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
@ -10,6 +11,7 @@ import (
|
|||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -32,7 +34,7 @@ import (
|
|||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -92,7 +94,7 @@ func TestMain(m *testing.M) {
|
|||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
|
@ -122,10 +124,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
@ -337,7 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -367,7 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -407,7 +409,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
// update in the wrapped fs
|
||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "original size: %v", originalSize)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
|
@ -416,7 +418,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
|
@ -424,7 +426,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
fs.Logf(nil, "updated size: %v", len(data2))
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
|
||||
// get a new instance from the cache
|
||||
if runInstance.wrappedIsExternal {
|
||||
|
@ -484,49 +486,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||
err = runInstance.retryBlock(func() error {
|
||||
li, err := runInstance.list(t, rootFs, "test")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 2 {
|
||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 0 {
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
fs.Logf(nil, "complete listing: %v", li)
|
||||
log.Printf("complete listing: %v", li)
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
|
@ -576,43 +578,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
fs.Logf(nil, "not found /test")
|
||||
log.Printf("not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
fs.Logf(nil, "not found /test/one")
|
||||
log.Printf("not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
fs.Logf(nil, "not found /test/one/test2")
|
||||
log.Printf("not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
fs.Logf(nil, "complete listing /test/one/test2")
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
|
@ -707,7 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
@ -742,7 +744,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
@ -770,24 +772,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||
|
||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
time.Sleep(time.Second * 30)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
|
@ -828,7 +830,7 @@ func newRun() *run {
|
|||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
}
|
||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
|
||||
return r
|
||||
}
|
||||
|
@ -849,8 +851,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
|
@ -874,12 +876,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
|
@ -890,14 +892,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
|
@ -1191,7 +1193,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size -= 32
|
||||
size = size - 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
|
|
3
backend/cache/cache_test.go
vendored
3
backend/cache/cache_test.go
vendored
|
@ -1,6 +1,7 @@
|
|||
// Test Cache filesystem interface
|
||||
|
||||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
@ -18,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
|||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
|
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
|
@ -2,6 +2,6 @@
|
|||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
|
1
backend/cache/cache_upload_test.go
vendored
1
backend/cache/cache_upload_test.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js && !race
|
||||
// +build !plan9,!js,!race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
|
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
|
13
backend/cache/handle.go
vendored
13
backend/cache/handle.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
@ -118,7 +119,7 @@ func (r *Handle) startReadWorkers() {
|
|||
r.scaleWorkers(totalWorkers)
|
||||
}
|
||||
|
||||
// scaleWorkers will increase the worker pool count by the provided amount
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := r.workers
|
||||
if current == desired {
|
||||
|
@ -208,7 +209,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart -= offset
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
|
@ -327,7 +328,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
|
@ -415,8 +416,10 @@ func (w *worker) run() {
|
|||
continue
|
||||
}
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
|
1
backend/cache/object.go
vendored
1
backend/cache/object.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
|
1
backend/cache/plex.go
vendored
1
backend/cache/plex.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
|
1
backend/cache/storage_memory.go
vendored
1
backend/cache/storage_memory.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
|
1
backend/cache/storage_persistent.go
vendored
1
backend/cache/storage_persistent.go
vendored
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
|
3
backend/cache/utils_test.go
vendored
3
backend/cache/utils_test.go
vendored
|
@ -1,6 +1,3 @@
|
|||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
// Chunker's composite files have one or more chunks
|
||||
|
@ -102,10 +101,8 @@ var (
|
|||
//
|
||||
// And still chunker's primary function is to chunk large files
|
||||
// rather than serve as a generic metadata container.
|
||||
const (
|
||||
maxMetadataSize = 1023
|
||||
maxMetadataSizeWritten = 255
|
||||
)
|
||||
const maxMetadataSize = 1023
|
||||
const maxMetadataSizeWritten = 255
|
||||
|
||||
// Current/highest supported metadata format.
|
||||
const metadataVersion = 2
|
||||
|
@ -308,6 +305,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||
root: rpath,
|
||||
opt: *opt,
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
|
||||
|
@ -319,15 +317,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||
// i.e. `rpath` does not exist in the wrapped remote, but chunker
|
||||
// detects a composite file because it finds the first chunk!
|
||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||
if err == nil && !f.useMeta {
|
||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||
newBase, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||
_, testErr := cache.Get(ctx, baseName+firstChunkPath)
|
||||
if testErr == fs.ErrorIsFile {
|
||||
f.base = newBase
|
||||
err = testErr
|
||||
}
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
|
@ -963,11 +959,6 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||
}
|
||||
if caseInsensitive {
|
||||
sameMain = strings.EqualFold(mainRemote, remote)
|
||||
if sameMain && f.base.Features().IsLocal {
|
||||
// on local, make sure the EqualFold still holds true when accounting for encoding.
|
||||
// sometimes paths with special characters will only normalize the same way in Standard Encoding.
|
||||
sameMain = strings.EqualFold(encoder.OS.FromStandardPath(mainRemote), encoder.OS.FromStandardPath(remote))
|
||||
}
|
||||
} else {
|
||||
sameMain = mainRemote == remote
|
||||
}
|
||||
|
@ -981,13 +972,13 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||
}
|
||||
continue
|
||||
}
|
||||
// fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
|
@ -1143,8 +1134,8 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||
// put implements Put, PutStream, PutUnchecked, Update
|
||||
func (f *Fs) put(
|
||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||
basePut putFn, action string, target fs.Object,
|
||||
) (obj fs.Object, err error) {
|
||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||
|
||||
// Perform consistency checks
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
||||
|
@ -1965,7 +1956,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
if entryType == fs.EntryObject {
|
||||
mainPath, _, _, xactID := f.parseChunkName(path)
|
||||
metaXactID := ""
|
||||
|
|
|
@ -36,7 +36,6 @@ func TestIntegration(t *testing.T) {
|
|||
"GetTier",
|
||||
"SetTier",
|
||||
"Metadata",
|
||||
"SetMetadata",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
|
|
|
@ -1119,17 +1119,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
|
@ -1287,17 +1286,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
|
@ -1363,7 +1351,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
|
|
@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir %= 256
|
||||
dir = dir % 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
|
@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos -= thisdir
|
||||
pos = pos - thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
|
@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
fh.buf[i] = 0
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
|
|
@ -1248,17 +1248,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
|
|
|
@ -151,7 +151,6 @@ func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
|||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
{Bit: uint64(rwFailOK), Name: "failok"},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,7 +160,6 @@ type rwChoice = fs.Bits[rwChoices]
|
|||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwFailOK
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
|
@ -175,9 +173,6 @@ var rwExamples = fs.OptionExamples{{
|
|||
}, {
|
||||
Value: rwWrite.String(),
|
||||
Help: "Write the value only",
|
||||
}, {
|
||||
Value: rwFailOK.String(),
|
||||
Help: "If writing fails log errors only, don't fail the transfer",
|
||||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
|
@ -1752,9 +1747,10 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
|||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
pathID = actualID(pathID)
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
|
@ -1923,7 +1919,7 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
|||
return "", "", isDocument
|
||||
}
|
||||
|
||||
// findExportFormat works out the optimum export settings
|
||||
// findExportFormatByMimeType works out the optimum export settings
|
||||
// for the given drive.File.
|
||||
//
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
|
@ -2219,7 +2215,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Done()
|
||||
wg.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2434,6 +2430,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
|||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
Description: leaf,
|
||||
Parents: []string{directoryID},
|
||||
ModifiedTime: modTime.Format(timeFormatOut),
|
||||
}
|
||||
|
@ -2833,7 +2830,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
// FIXME remove this when google fixes the problem!
|
||||
if isDoc {
|
||||
// A short sleep is needed here in order to make the
|
||||
// change effective, without it is ignored. This is
|
||||
// change effective, without it is is ignored. This is
|
||||
// probably some eventual consistency nastiness.
|
||||
sleepTime := 2 * time.Second
|
||||
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
|
||||
|
@ -3776,7 +3773,7 @@ file named "foo ' \.txt":
|
|||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
|
@ -3792,7 +3789,7 @@ The result is a JSON array of matches, for example:
|
|||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
]`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
|
@ -3965,7 +3962,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
return "", hash.ErrUnsupported
|
||||
}
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
|
|
|
@ -551,11 +551,9 @@ func (f *Fs) InternalTestQuery(t *testing.T) {
|
|||
|
||||
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
||||
require.NoError(t, err)
|
||||
require.True(t, len(results) > 0)
|
||||
for _, result := range results {
|
||||
assert.True(t, len(result.Id) > 0)
|
||||
assert.Equal(t, result.Name, item)
|
||||
}
|
||||
require.Len(t, results, 1)
|
||||
assert.Len(t, results[0].Id, 33)
|
||||
assert.Equal(t, results[0].Name, item)
|
||||
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
||||
}
|
||||
})
|
||||
|
@ -566,7 +564,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Options{}
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/sync/errgroup"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
|
@ -152,15 +151,11 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
|||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set permission %s for %q: %v", perm.Role, perm.EmailAddress, err)
|
||||
fs.Errorf(f, "Failed to set permission: %v", err)
|
||||
errs.Add(err)
|
||||
}
|
||||
}
|
||||
err = errs.Err("failed to set permission")
|
||||
if err != nil {
|
||||
err = fserrors.NoRetryError(err)
|
||||
}
|
||||
return err
|
||||
return errs.Err("failed to set permission")
|
||||
}
|
||||
|
||||
// Clean attributes from permissions which we can't write
|
||||
|
@ -262,7 +257,7 @@ func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.La
|
|||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set labels: %w", err)
|
||||
return fmt.Errorf("failed to set owner: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -372,7 +367,6 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
|||
// shared drives.
|
||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
||||
// Don't process permissions if there aren't any specifically set
|
||||
fs.Debugf(o, "Ignoring %d permissions and %d permissionIds as is shared drive with hasAugmentedPermissions false", len(info.Permissions), len(info.PermissionIds))
|
||||
info.Permissions = nil
|
||||
info.PermissionIds = nil
|
||||
}
|
||||
|
@ -554,12 +548,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||
}
|
||||
// Can't set Owner on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
err := f.setOwner(ctx, info, v)
|
||||
if err != nil && f.opt.MetadataOwner.IsSet(rwFailOK) {
|
||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return f.setOwner(ctx, info, v)
|
||||
})
|
||||
case "permissions":
|
||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
|
@ -572,13 +561,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||
}
|
||||
// Can't set Permissions on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
err := f.setPermissions(ctx, info, perms)
|
||||
if err != nil && f.opt.MetadataPermissions.IsSet(rwFailOK) {
|
||||
// We've already logged the permissions errors individually here
|
||||
fs.Debugf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return f.setPermissions(ctx, info, perms)
|
||||
})
|
||||
case "labels":
|
||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
||||
|
@ -591,12 +574,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
|||
}
|
||||
// Can't set Labels on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
err := f.setLabels(ctx, info, labels)
|
||||
if err != nil && f.opt.MetadataLabels.IsSet(rwFailOK) {
|
||||
fs.Errorf(f, "Ignoring error as failok is set: %v", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return f.setLabels(ctx, info, labels)
|
||||
})
|
||||
case "folder-color-rgb":
|
||||
updateInfo.FolderColorRgb = v
|
||||
|
|
|
@ -216,10 +216,7 @@ are supported.
|
|||
|
||||
Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.
|
||||
|
||||
See also --dropbox-root-namespace for an alternative way to work with shared
|
||||
folders.`,
|
||||
shared folder.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
@ -240,11 +237,6 @@ folders.`,
|
|||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "root_namespace",
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
}
|
||||
|
@ -261,7 +253,6 @@ type Options struct {
|
|||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
|
@ -386,7 +377,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
|
@ -511,11 +502,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
|
||||
f.features.Fill(ctx, f)
|
||||
|
||||
if f.opt.RootNsid != "" {
|
||||
f.ns = f.opt.RootNsid
|
||||
fs.Debugf(f, "Overriding root namespace to %q", f.ns)
|
||||
} else if strings.HasPrefix(root, "/") {
|
||||
// If root starts with / then use the actual root
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
|
@ -656,7 +644,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
||||
// listSharedFoldersApi lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
started := false
|
||||
|
|
|
@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374, 412: // Flood detected seems to be #412 now
|
||||
case 374:
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
|
|
|
@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
|
||||
// Find current directory ID
|
||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
return src, nil
|
||||
}
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
|
@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rename := dstLeaf
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
rename = ""
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,901 +0,0 @@
|
|||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
if strings.Contains(opt.Site, ".") {
|
||||
config.EndpointOverride = opt.Site
|
||||
} else {
|
||||
config.Subdomain = opt.Site
|
||||
}
|
||||
|
||||
_, err = url.ParseRequestURI(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
|
@ -1,17 +0,0 @@
|
|||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,16 +0,0 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFrostFS:",
|
||||
NilObject: (*Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
|
@ -1,326 +0,0 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
resolver "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
type endpointInfo struct {
|
||||
Address string
|
||||
Priority int
|
||||
Weight float64
|
||||
}
|
||||
|
||||
func publicReadWriteCCPRules() []chain.Rule {
|
||||
return []chain.Rule{
|
||||
{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodPutObject,
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodDeleteObject,
|
||||
native.MethodSearchObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodPatchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Any: false},
|
||||
}
|
||||
}
|
||||
|
||||
func privateCCPRules() []chain.Rule {
|
||||
rule := publicReadWriteCCPRules()
|
||||
// The same as public-read-write, except that only the owner is allowed to perform the listed actions
|
||||
rule[0].Condition = []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOwner,
|
||||
},
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
func publicReadCCPRules() []chain.Rule {
|
||||
rule := privateCCPRules()
|
||||
// Add a rule that allows other users to perform reading actions.
|
||||
rule = append(rule, chain.Rule{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodSearchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Condition: []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOthers,
|
||||
},
|
||||
}, Any: false})
|
||||
return rule
|
||||
}
|
||||
|
||||
func parseContainerCreationPolicyString(policyString string) ([]chain.Rule, error) {
|
||||
switch policyString {
|
||||
case "private":
|
||||
return privateCCPRules(), nil
|
||||
case "public-read":
|
||||
return publicReadCCPRules(), nil
|
||||
case "public-read-write":
|
||||
return publicReadWriteCCPRules(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid container creation policy: %s", policyString)
|
||||
}
|
||||
|
||||
func parseEndpoints(endpointParam string) ([]endpointInfo, error) {
|
||||
var err error
|
||||
expectedLength := -1 // to make sure all endpoints have the same format
|
||||
|
||||
endpoints := strings.Split(strings.TrimSpace(endpointParam), " ")
|
||||
res := make([]endpointInfo, 0, len(endpoints))
|
||||
seen := make(map[string]struct{}, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpointInfoSplit := strings.Split(endpoint, ",")
|
||||
address := endpointInfoSplit[0]
|
||||
|
||||
if len(address) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[address]; ok {
|
||||
return nil, fmt.Errorf("endpoint '%s' is already defined", address)
|
||||
}
|
||||
seen[address] = struct{}{}
|
||||
|
||||
epInfo := endpointInfo{
|
||||
Address: address,
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}
|
||||
|
||||
if expectedLength == -1 {
|
||||
expectedLength = len(endpointInfoSplit)
|
||||
}
|
||||
|
||||
if len(endpointInfoSplit) != expectedLength {
|
||||
return nil, fmt.Errorf("all endpoints must have the same format: '%s'", endpointParam)
|
||||
}
|
||||
|
||||
switch len(endpointInfoSplit) {
|
||||
case 1:
|
||||
case 2:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
case 3:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
|
||||
epInfo.Weight, err = parseWeight(endpointInfoSplit[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid endpoint format '%s'", endpoint)
|
||||
}
|
||||
|
||||
res = append(res, epInfo)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parsePriority(priorityStr string) (int, error) {
|
||||
priority, err := strconv.Atoi(priorityStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid priority '%s': %w", priorityStr, err)
|
||||
}
|
||||
if priority <= 0 {
|
||||
return 0, fmt.Errorf("priority must be positive '%s'", priorityStr)
|
||||
}
|
||||
|
||||
return priority, nil
|
||||
}
|
||||
|
||||
func parseWeight(weightStr string) (float64, error) {
|
||||
weight, err := strconv.ParseFloat(weightStr, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid weight '%s': %w", weightStr, err)
|
||||
}
|
||||
if weight <= 0 {
|
||||
return 0, fmt.Errorf("weight must be positive '%s'", weightStr)
|
||||
}
|
||||
|
||||
return weight, nil
|
||||
}
|
||||
|
||||
func createPool(ctx context.Context, key *keys.PrivateKey, cfg *Options) (*pool.Pool, error) {
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prm.SetNodeDialTimeout(time.Duration(cfg.FrostfsConnectionTimeout))
|
||||
prm.SetHealthcheckTimeout(time.Duration(cfg.FrostfsRequestTimeout))
|
||||
prm.SetClientRebalanceInterval(time.Duration(cfg.FrostfsRebalanceInterval))
|
||||
prm.SetSessionExpirationDuration(cfg.FrostfsSessionExpiration)
|
||||
|
||||
nodes, err := getNodePoolParams(cfg.FrostfsEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
prm.AddNode(node)
|
||||
}
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create pool: %w", err)
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func getNodePoolParams(endpointParam string) ([]pool.NodeParam, error) {
|
||||
endpointInfos, err := parseEndpoints(endpointParam)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse endpoints params: %w", err)
|
||||
}
|
||||
|
||||
res := make([]pool.NodeParam, len(endpointInfos))
|
||||
for i, info := range endpointInfos {
|
||||
res[i] = pool.NewNodeParam(info.Priority, info.Address, info.Weight)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func createNNSResolver(cfg *Options) (*resolver.NNS, error) {
|
||||
if cfg.RPCEndpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var nns resolver.NNS
|
||||
if err := nns.Dial(cfg.RPCEndpoint); err != nil {
|
||||
return nil, fmt.Errorf("dial NNS resolver: %w", err)
|
||||
}
|
||||
|
||||
return &nns, nil
|
||||
}
|
||||
|
||||
func getAccount(cfg *Options) (*wallet.Account, error) {
|
||||
w, err := wallet.NewWalletFromFile(cfg.Wallet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := w.GetChangeAddress()
|
||||
if cfg.Address != "" {
|
||||
addr, err = flags.ParseAddress(cfg.Address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address")
|
||||
}
|
||||
}
|
||||
acc := w.GetAccount(addr)
|
||||
err = acc.Decrypt(cfg.Password, w.Scrypt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func newAddress(cnrID cid.ID, objID oid.ID) oid.Address {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnrID)
|
||||
addr.SetObject(objID)
|
||||
return addr
|
||||
}
|
||||
|
||||
func formObject(own *user.ID, cnrID cid.ID, name string, header map[string]string) *object.Object {
|
||||
attributes := make([]object.Attribute, 0, 1+len(header))
|
||||
filename := object.NewAttribute()
|
||||
filename.SetKey(object.AttributeFileName)
|
||||
filename.SetValue(name)
|
||||
|
||||
attributes = append(attributes, *filename)
|
||||
|
||||
for key, val := range header {
|
||||
attr := object.NewAttribute()
|
||||
attr.SetKey(key)
|
||||
attr.SetValue(val)
|
||||
attributes = append(attributes, *attr)
|
||||
}
|
||||
|
||||
obj := object.New()
|
||||
obj.SetOwnerID(*own)
|
||||
obj.SetContainerID(cnrID)
|
||||
obj.SetAttributes(attributes...)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func newDir(cnrID cid.ID, cnr container.Container, defaultZone string) *fs.Dir {
|
||||
remote := cnrID.EncodeToString()
|
||||
timestamp := container.CreatedAt(cnr)
|
||||
|
||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
||||
if defaultZone != domain.Zone() {
|
||||
remote = domain.Name() + "." + domain.Zone()
|
||||
} else {
|
||||
remote = domain.Name()
|
||||
}
|
||||
}
|
||||
|
||||
dir := fs.NewDir(remote, timestamp)
|
||||
dir.SetID(cnrID.String())
|
||||
return dir
|
||||
}
|
||||
|
||||
func getContainerNameAndZone(containerStr, defaultZone string) (cnrName string, cnrZone string) {
|
||||
defer func() {
|
||||
if len(cnrZone) == 0 {
|
||||
cnrZone = defaultZone
|
||||
}
|
||||
}()
|
||||
if idx := strings.Index(containerStr, "."); idx >= 0 {
|
||||
return containerStr[:idx], containerStr[idx+1:]
|
||||
}
|
||||
return containerStr, defaultZone
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetZoneAndContainerNames(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
cnrStr string
|
||||
defZone string
|
||||
expectedName string
|
||||
expectedZone string
|
||||
}{
|
||||
{
|
||||
cnrStr: "",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "cnr_name",
|
||||
defZone: "def_zone",
|
||||
expectedName: "cnr_name",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "cnr_name.",
|
||||
defZone: "def_zone",
|
||||
expectedName: "cnr_name",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: ".cnr_zone",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "cnr_zone",
|
||||
}, {
|
||||
cnrStr: ".cnr_zone",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "cnr_zone",
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
actualName, actualZone := getContainerNameAndZone(tc.cnrStr, tc.defZone)
|
||||
require.Equal(t, tc.expectedZone, actualZone)
|
||||
require.Equal(t, tc.expectedName, actualName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseContainerCreationPolicy(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
ACLString string
|
||||
ExpectedError bool
|
||||
}{
|
||||
{
|
||||
ACLString: "",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-ready",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read-write",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "private",
|
||||
ExpectedError: false,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
rules, err := parseContainerCreationPolicyString(tc.ACLString)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, rules)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rules)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
EndpointsParam string
|
||||
ExpectedError bool
|
||||
ExpectedResult []endpointInfo
|
||||
}{
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2,3",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: " s01.frostfs.devenv:8080 s02.frostfs.devenv:8080 ",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s03.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,-1,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,,",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,sd,sd",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,0",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
res, err := parseEndpoints(tc.EndpointsParam)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.ExpectedResult, res)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
|||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
|||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`"),
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
|
|
@ -1,311 +0,0 @@
|
|||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,17 +0,0 @@
|
|||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
|
@ -697,7 +697,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == f.opt.Enc.ToStandardPath(directory) {
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
|
|
|
@ -620,7 +620,9 @@ func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -38,7 +38,7 @@ type dirPattern struct {
|
|||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||
}
|
||||
|
||||
// dirPatterns is a slice of all the directory patterns
|
||||
// dirPatters is a slice of all the directory patterns
|
||||
type dirPatterns []dirPattern
|
||||
|
||||
// patterns describes the layout of the google photos backend file system.
|
||||
|
|
|
@ -535,17 +535,6 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := o.Object.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package hdfs provides an interface to the HDFS storage system.
|
||||
package hdfs
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// Test HDFS filesystem interface
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
// Package hdfs provides an interface to the HDFS storage system.
|
||||
package hdfs
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
|
|
|
@ -89,10 +89,6 @@ that directory listings are much quicker, but rclone won't have the times or
|
|||
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_escape",
|
||||
Help: "Do not escape URL metacharacters in path names.",
|
||||
Default: false,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
|
@ -104,7 +100,6 @@ type Options struct {
|
|||
NoSlash bool `config:"no_slash"`
|
||||
NoHead bool `config:"no_head"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
NoEscape bool `config:"no_escape"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
|
@ -331,11 +326,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
if f.opt.NoEscape {
|
||||
// Directly concatenate without escaping, no_escape behavior
|
||||
return f.endpointURL + remote
|
||||
}
|
||||
// Default behavior
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
|
|||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path += expires
|
||||
path = path + expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
|
|
@ -1487,38 +1487,16 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.mkParentDir(ctx, remote); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
if err == nil {
|
||||
var createTime time.Time
|
||||
var createTimeMeta bool
|
||||
var modTime time.Time
|
||||
var modTimeMeta bool
|
||||
if meta != nil {
|
||||
createTime, createTimeMeta = srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
}
|
||||
modTime, modTimeMeta = srcObj.parseFsMetadataTime(meta, "mtime")
|
||||
if !modTimeMeta {
|
||||
modTime = srcObj.modTime
|
||||
}
|
||||
}
|
||||
if bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
// Workaround necessary when destination was a trashed file, to avoid the copied file also being in trash (bug in api?)
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
} else if createTimeMeta || modTimeMeta {
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
}
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -1545,30 +1523,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.mkParentDir(ctx, remote); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
|
||||
if err == nil && meta != nil {
|
||||
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
}
|
||||
modTime, modTimeMeta := srcObj.parseFsMetadataTime(meta, "mtime")
|
||||
if !modTimeMeta {
|
||||
modTime = srcObj.modTime
|
||||
}
|
||||
if createTimeMeta || modTimeMeta {
|
||||
info, err = f.createOrUpdate(ctx, remote, createTime, modTime, info.Size, info.MD5)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
@ -1826,20 +1786,6 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
|||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// parseFsMetadataTime parses a time string from fs.Metadata with key
|
||||
func (o *Object) parseFsMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
t, err = time.Parse(time.RFC3339Nano, value) // metadata stores RFC3339Nano timestamps
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return t, ok
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
|
@ -2011,11 +1957,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
var createdTime string
|
||||
var modTime string
|
||||
if meta != nil {
|
||||
if t, ok := o.parseFsMetadataTime(meta, "btime"); ok {
|
||||
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
|
||||
if v, ok := meta["btime"]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
|
||||
} else {
|
||||
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
|
||||
}
|
||||
}
|
||||
if t, ok := o.parseFsMetadataTime(meta, "mtime"); ok {
|
||||
modTime = api.Rfc3339Time(t).String()
|
||||
if v, ok := meta["mtime"]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
|
||||
} else {
|
||||
modTime = api.Rfc3339Time(t).String()
|
||||
}
|
||||
}
|
||||
}
|
||||
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
|
||||
|
|
|
@ -59,7 +59,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||
//"utime" - read-only
|
||||
//"content-type" - read-only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build darwin || dragonfly || freebsd || linux
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
|
@ -23,9 +24,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||
}
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), //nolint: unconvert // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), //nolint: unconvert // bytes in use
|
||||
Free: fs.NewUsageValue(bs * int64(s.Bavail)), //nolint: unconvert // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
//go:build darwin && cgo
|
||||
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// # This is stored with the remote path given
|
||||
//
|
||||
// # It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Create destination
|
||||
dstObj := f.newObject(remote)
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.path
|
||||
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = Clone(srcPath, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
|
||||
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
|
||||
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
|
||||
func Clone(src, dst string) error {
|
||||
state := apfs.CopyFileStateAlloc()
|
||||
defer func() {
|
||||
if err := apfs.CopyFileStateFree(state); err != nil {
|
||||
fs.Errorf(dst, "free state error: %v", err)
|
||||
}
|
||||
}()
|
||||
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
|
||||
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
//go:build plan9 || js
|
||||
//go:build windows || plan9 || js
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build !windows && !plan9 && !js
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
|
@ -32,11 +32,9 @@ import (
|
|||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
|
||||
// timeType allows the user to choose what exactly ModTime() returns
|
||||
type timeType = fs.Enum[timeTypeChoices]
|
||||
|
@ -80,46 +78,41 @@ supported by all file systems) under the "user.*" prefix.
|
|||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
|
||||
|
@ -129,12 +122,11 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
|||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
@ -148,12 +140,11 @@ some OSes.
|
|||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
|
@ -184,96 +175,68 @@ directory listing (where the initial stat value comes from on Windows)
|
|||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_clone",
|
||||
Help: `Disable reflink cloning for server-side copies.
|
||||
|
||||
Normally, for local-to-local transfers, rclone will "clone" the file when
|
||||
possible, and fall back to "copying" only when cloning is not supported.
|
||||
|
||||
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
|
||||
the original file. Unlike a "hardlink", the two files are independent and
|
||||
neither will affect the other if subsequently modified.
|
||||
|
||||
Cloning is usually preferable to copying, as it is much faster and is
|
||||
deduplicated by default (i.e. having two identical files does not consume more
|
||||
storage than having just one.) However, for use cases where data redundancy is
|
||||
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
|
||||
|
||||
Currently, cloning is only supported when using APFS on macOS (support for other
|
||||
platforms may be added in the future.)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
|
||||
Normally rclone does all operations on the mtime or Modification time.
|
||||
|
||||
|
@ -292,29 +255,27 @@ will silently replace it with the modification time which all OSes support.
|
|||
Note that setting the time will still set the modified time so this is
|
||||
only useful for reading.
|
||||
`,
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
},
|
||||
},
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
@ -335,7 +296,6 @@ type Options struct {
|
|||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
|
@ -424,10 +384,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
if opt.NoClone {
|
||||
// Disable server-side copy when --local-no-clone is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
|
@ -1599,60 +1555,33 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an Object
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
err := o.writeMetadata(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetMetadata failed on Object: %w", err)
|
||||
}
|
||||
// Re-read info now we have finished setting stuff
|
||||
return o.lstat()
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
var vol string
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
vol = filepath.VolumeName(s)
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = s[len(vol):]
|
||||
}
|
||||
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
|
||||
// Take care of the case Standard: ././‛. (the first dot means current directory)
|
||||
if enc != encoder.Standard {
|
||||
s = filepath.ToSlash(s)
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
if (p == ".") || (p == "..") {
|
||||
encoded[i] = p
|
||||
continue
|
||||
}
|
||||
part := enc.FromStandardName(p)
|
||||
changed = changed || part != p
|
||||
encoded[i] = part
|
||||
}
|
||||
if changed {
|
||||
s = strings.Join(encoded, "/")
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = vol + s
|
||||
}
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
if !noUNC {
|
||||
// Convert to UNC. It does nothing on non windows platforms.
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -1700,7 +1629,6 @@ var (
|
|||
_ fs.MkdirMetadataer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
_ fs.SetMetadataer = &Object{}
|
||||
_ fs.Directory = &Directory{}
|
||||
_ fs.SetModTimer = &Directory{}
|
||||
_ fs.SetMetadataer = &Directory{}
|
||||
|
|
|
@ -73,6 +73,7 @@ func TestUpdatingCheck(t *testing.T) {
|
|||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Test corrupted on transfer
|
||||
|
@ -223,7 +224,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
b := bytes.NewBufferString("CONTENT")
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
@ -268,66 +269,22 @@ func TestMetadata(t *testing.T) {
|
|||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += linkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
|
@ -350,10 +307,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
|
@ -368,21 +321,18 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
@ -392,12 +342,13 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
if hasBtime {
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
|
@ -421,10 +372,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -434,11 +381,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
|
@ -448,10 +391,11 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
if xattrSupported {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
|
@ -628,35 +572,4 @@ func TestCopySymlink(t *testing.T) {
|
|||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
|
||||
// Set fs into "-L/--copy-links" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err = f.NewObject(ctx, "src/link.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a NON-symlink and it has the right contents
|
||||
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||
fi, err := os.Lstat(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
|
||||
// Test that copying a normal file also works
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package local
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -73,12 +72,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
|
|||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
parsed, err := strconv.ParseInt(value, base, 0)
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(parsed)
|
||||
result = int(result64)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
|
@ -105,11 +104,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
err = setBTime(o.path, btime)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
|
@ -125,11 +120,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
|
@ -137,23 +128,9 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build darwin || freebsd || netbsd
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build dragonfly || plan9 || js
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
//go:build openbsd || solaris
|
||||
// +build openbsd solaris
|
||||
|
||||
package local
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue