Compare commits
5 commits
tcl/master
...
fix-7337-s
Author | SHA1 | Date | |
---|---|---|---|
|
3fa5a424a9 | ||
|
9fb0afad88 | ||
|
2f9c2cf75e | ||
|
1ac18e5765 | ||
|
3e8cee148a |
465 changed files with 55429 additions and 97672 deletions
|
@ -1,45 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage, bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory -->
|
||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
||||
<!--- uncomment the following phrase: -->
|
||||
|
||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. -->
|
||||
|
||||
1.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used:
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
|
@ -1 +0,0 @@
|
|||
blank_issues_enabled: false
|
|
@ -1,24 +0,0 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
|
@ -1,20 +0,0 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
@ -1,67 +0,0 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
- name: Run linters
|
||||
run: make check
|
||||
test:
|
||||
name: Test
|
||||
runs-on: oci-runner
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Tests for the FrostFS backend
|
||||
env:
|
||||
RESTIC_TEST_FUSE: false
|
||||
AIO_IMAGE: truecloudlab/frostfs-aio
|
||||
AIO_VERSION: 1.7.0-nightly.4
|
||||
RCLONE_CONFIG: /config/rclone.conf
|
||||
|
||||
# run only tests related to FrostFS backend
|
||||
run: |-
|
||||
podman-service.sh
|
||||
podman info
|
||||
|
||||
mkdir /config
|
||||
printf "[TestFrostFS]\ntype = frostfs\nendpoint = localhost:8080\nwallet = /config/wallet.json\nplacement_policy = REP 1\nrequest_timeout = 20s\nconnection_timeout = 21s" > /config/rclone.conf
|
||||
|
||||
echo "Run frostfs aio container"
|
||||
docker run -d --net=host --name aio $AIO_IMAGE:$AIO_VERSION --restart always -p 8080:8080
|
||||
|
||||
echo "Wait for frostfs to start"
|
||||
until docker exec aio curl --fail http://localhost:8083 > /dev/null 2>&1; do sleep 0.2; done;
|
||||
|
||||
echo "Issue creds"
|
||||
docker exec aio /usr/bin/issue-creds.sh native
|
||||
echo "Copy wallet"
|
||||
docker cp aio:/config/user-wallet.json /config/wallet.json
|
||||
|
||||
echo "Start tests"
|
||||
go test -v github.com/rclone/rclone/backend/frostfs
|
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
|
@ -124,7 +124,7 @@ jobs:
|
|||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
|
@ -237,7 +237,7 @@ jobs:
|
|||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
|
|
|
@ -32,27 +32,15 @@ jobs:
|
|||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
|
|
|
@ -13,7 +13,6 @@ linters:
|
|||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
@ -99,46 +98,3 @@ linters-settings:
|
|||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
|
|
@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
|||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
|
@ -508,7 +508,7 @@ You'll need to modify the following files
|
|||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
|
|
@ -21,8 +21,6 @@ Current active maintainers of rclone are:
|
|||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
|
40450
MANUAL.html
generated
40450
MANUAL.html
generated
File diff suppressed because it is too large
Load diff
4903
MANUAL.txt
generated
4903
MANUAL.txt
generated
File diff suppressed because it is too large
Load diff
|
@ -55,14 +55,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
|
@ -96,7 +93,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
|
@ -105,7 +101,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
|
|
|
@ -168,8 +168,6 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
|||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.68.2
|
||||
v1.68.0
|
||||
|
|
|
@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
|
|
@ -17,10 +17,7 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/frostfs"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
|
@ -42,7 +39,6 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
|
|
@ -2094,6 +2094,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
|
@ -2116,7 +2117,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
|
|
|
@ -1035,10 +1035,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else if size != o.Size() {
|
||||
} else {
|
||||
// Resize the file if needed
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
|
|
22
backend/cache/cache.go
vendored
22
backend/cache/cache.go
vendored
|
@ -409,16 +409,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
73
backend/cache/cache_internal_test.go
vendored
73
backend/cache/cache_internal_test.go
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
|
|||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
|
@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
// update in the wrapped fs
|
||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "original size: %v", originalSize)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
|
@ -416,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
|
@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
fs.Logf(nil, "updated size: %v", len(data2))
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
|
||||
// get a new instance from the cache
|
||||
if runInstance.wrappedIsExternal {
|
||||
|
@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||
err = runInstance.retryBlock(func() error {
|
||||
li, err := runInstance.list(t, rootFs, "test")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 2 {
|
||||
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||
log.Printf("not expected listing /test: %v", li)
|
||||
return fmt.Errorf("not expected listing /test: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 0 {
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
|
||||
li, err = runInstance.list(t, rootFs, "test/second")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||
log.Printf("not expected listing /test/second: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "data.bin" {
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/second/data.bin" {
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
|
||||
fs.Logf(nil, "complete listing: %v", li)
|
||||
log.Printf("complete listing: %v", li)
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
|
@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
fs.Logf(nil, "not found /test")
|
||||
log.Printf("not found /test")
|
||||
return fmt.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
fs.Logf(nil, "not found /test/one")
|
||||
log.Printf("not found /test/one")
|
||||
return fmt.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
fs.Logf(nil, "not found /test/one/test2")
|
||||
log.Printf("not found /test/one/test2")
|
||||
return fmt.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
fs.Logf(nil, "err: %v", err)
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
fs.Logf(nil, "unexpected listing: %v", li)
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return fmt.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
fs.Logf(nil, "complete listing /test/one/test2")
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
|
@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||
|
||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
time.Sleep(time.Second * 30)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 1)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
|
||||
di, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
fs.Logf(nil, "len: %v", len(di))
|
||||
log.Printf("len: %v", len(di))
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
|
@ -828,7 +829,7 @@ func newRun() *run {
|
|||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
}
|
||||
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
||||
|
||||
return r
|
||||
}
|
||||
|
@ -849,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
|
@ -874,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
|
@ -890,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
|
@ -1191,7 +1192,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size -= 32
|
||||
size = size - 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
|
|
10
backend/cache/handle.go
vendored
10
backend/cache/handle.go
vendored
|
@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart -= offset
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
|
@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
|
@ -415,8 +415,10 @@ func (w *worker) run() {
|
|||
continue
|
||||
}
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
|
|
@ -987,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||
}
|
||||
}
|
||||
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
|
@ -1363,7 +1362,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
|
|
@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir %= 256
|
||||
dir = dir % 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
|
@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos -= thisdir
|
||||
pos = pos - thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
|
@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
fh.buf[i] = 0
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
|
|
@ -2219,7 +2219,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Done()
|
||||
wg.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3965,7 +3965,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
return "", hash.ErrUnsupported
|
||||
}
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
|
|
|
@ -386,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
|
|
|
@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374, 412: // Flood detected seems to be #412 now
|
||||
case 374:
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
|
|
|
@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
|
||||
// Find current directory ID
|
||||
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If it is in the correct directory, just rename it
|
||||
var url string
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
return src, nil
|
||||
}
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||
if currentDirectoryID == directoryID {
|
||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||
}
|
||||
|
@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
url = resp.URLs[0].URL
|
||||
} else {
|
||||
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rename := dstLeaf
|
||||
// No rename needed
|
||||
if srcLeaf == dstLeaf {
|
||||
rename = ""
|
||||
}
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,901 +0,0 @@
|
|||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
if strings.Contains(opt.Site, ".") {
|
||||
config.EndpointOverride = opt.Site
|
||||
} else {
|
||||
config.Subdomain = opt.Site
|
||||
}
|
||||
|
||||
_, err = url.ParseRequestURI(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
|
@ -1,17 +0,0 @@
|
|||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,16 +0,0 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFrostFS:",
|
||||
NilObject: (*Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
|
@ -1,326 +0,0 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
resolver "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
type endpointInfo struct {
|
||||
Address string
|
||||
Priority int
|
||||
Weight float64
|
||||
}
|
||||
|
||||
func publicReadWriteCCPRules() []chain.Rule {
|
||||
return []chain.Rule{
|
||||
{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodPutObject,
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodDeleteObject,
|
||||
native.MethodSearchObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodPatchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Any: false},
|
||||
}
|
||||
}
|
||||
|
||||
func privateCCPRules() []chain.Rule {
|
||||
rule := publicReadWriteCCPRules()
|
||||
// The same as public-read-write, except that only the owner is allowed to perform the listed actions
|
||||
rule[0].Condition = []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOwner,
|
||||
},
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
func publicReadCCPRules() []chain.Rule {
|
||||
rule := privateCCPRules()
|
||||
// Add a rule that allows other users to perform reading actions.
|
||||
rule = append(rule, chain.Rule{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodSearchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Condition: []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOthers,
|
||||
},
|
||||
}, Any: false})
|
||||
return rule
|
||||
}
|
||||
|
||||
func parseContainerCreationPolicyString(policyString string) ([]chain.Rule, error) {
|
||||
switch policyString {
|
||||
case "private":
|
||||
return privateCCPRules(), nil
|
||||
case "public-read":
|
||||
return publicReadCCPRules(), nil
|
||||
case "public-read-write":
|
||||
return publicReadWriteCCPRules(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid container creation policy: %s", policyString)
|
||||
}
|
||||
|
||||
func parseEndpoints(endpointParam string) ([]endpointInfo, error) {
|
||||
var err error
|
||||
expectedLength := -1 // to make sure all endpoints have the same format
|
||||
|
||||
endpoints := strings.Split(strings.TrimSpace(endpointParam), " ")
|
||||
res := make([]endpointInfo, 0, len(endpoints))
|
||||
seen := make(map[string]struct{}, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpointInfoSplit := strings.Split(endpoint, ",")
|
||||
address := endpointInfoSplit[0]
|
||||
|
||||
if len(address) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[address]; ok {
|
||||
return nil, fmt.Errorf("endpoint '%s' is already defined", address)
|
||||
}
|
||||
seen[address] = struct{}{}
|
||||
|
||||
epInfo := endpointInfo{
|
||||
Address: address,
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}
|
||||
|
||||
if expectedLength == -1 {
|
||||
expectedLength = len(endpointInfoSplit)
|
||||
}
|
||||
|
||||
if len(endpointInfoSplit) != expectedLength {
|
||||
return nil, fmt.Errorf("all endpoints must have the same format: '%s'", endpointParam)
|
||||
}
|
||||
|
||||
switch len(endpointInfoSplit) {
|
||||
case 1:
|
||||
case 2:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
case 3:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
|
||||
epInfo.Weight, err = parseWeight(endpointInfoSplit[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid endpoint format '%s'", endpoint)
|
||||
}
|
||||
|
||||
res = append(res, epInfo)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parsePriority(priorityStr string) (int, error) {
|
||||
priority, err := strconv.Atoi(priorityStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid priority '%s': %w", priorityStr, err)
|
||||
}
|
||||
if priority <= 0 {
|
||||
return 0, fmt.Errorf("priority must be positive '%s'", priorityStr)
|
||||
}
|
||||
|
||||
return priority, nil
|
||||
}
|
||||
|
||||
func parseWeight(weightStr string) (float64, error) {
|
||||
weight, err := strconv.ParseFloat(weightStr, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid weight '%s': %w", weightStr, err)
|
||||
}
|
||||
if weight <= 0 {
|
||||
return 0, fmt.Errorf("weight must be positive '%s'", weightStr)
|
||||
}
|
||||
|
||||
return weight, nil
|
||||
}
|
||||
|
||||
func createPool(ctx context.Context, key *keys.PrivateKey, cfg *Options) (*pool.Pool, error) {
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prm.SetNodeDialTimeout(time.Duration(cfg.FrostfsConnectionTimeout))
|
||||
prm.SetHealthcheckTimeout(time.Duration(cfg.FrostfsRequestTimeout))
|
||||
prm.SetClientRebalanceInterval(time.Duration(cfg.FrostfsRebalanceInterval))
|
||||
prm.SetSessionExpirationDuration(cfg.FrostfsSessionExpiration)
|
||||
|
||||
nodes, err := getNodePoolParams(cfg.FrostfsEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
prm.AddNode(node)
|
||||
}
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create pool: %w", err)
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func getNodePoolParams(endpointParam string) ([]pool.NodeParam, error) {
|
||||
endpointInfos, err := parseEndpoints(endpointParam)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse endpoints params: %w", err)
|
||||
}
|
||||
|
||||
res := make([]pool.NodeParam, len(endpointInfos))
|
||||
for i, info := range endpointInfos {
|
||||
res[i] = pool.NewNodeParam(info.Priority, info.Address, info.Weight)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func createNNSResolver(cfg *Options) (*resolver.NNS, error) {
|
||||
if cfg.RPCEndpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var nns resolver.NNS
|
||||
if err := nns.Dial(cfg.RPCEndpoint); err != nil {
|
||||
return nil, fmt.Errorf("dial NNS resolver: %w", err)
|
||||
}
|
||||
|
||||
return &nns, nil
|
||||
}
|
||||
|
||||
func getAccount(cfg *Options) (*wallet.Account, error) {
|
||||
w, err := wallet.NewWalletFromFile(cfg.Wallet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := w.GetChangeAddress()
|
||||
if cfg.Address != "" {
|
||||
addr, err = flags.ParseAddress(cfg.Address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address")
|
||||
}
|
||||
}
|
||||
acc := w.GetAccount(addr)
|
||||
err = acc.Decrypt(cfg.Password, w.Scrypt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func newAddress(cnrID cid.ID, objID oid.ID) oid.Address {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnrID)
|
||||
addr.SetObject(objID)
|
||||
return addr
|
||||
}
|
||||
|
||||
func formObject(own *user.ID, cnrID cid.ID, name string, header map[string]string) *object.Object {
|
||||
attributes := make([]object.Attribute, 0, 1+len(header))
|
||||
filename := object.NewAttribute()
|
||||
filename.SetKey(object.AttributeFileName)
|
||||
filename.SetValue(name)
|
||||
|
||||
attributes = append(attributes, *filename)
|
||||
|
||||
for key, val := range header {
|
||||
attr := object.NewAttribute()
|
||||
attr.SetKey(key)
|
||||
attr.SetValue(val)
|
||||
attributes = append(attributes, *attr)
|
||||
}
|
||||
|
||||
obj := object.New()
|
||||
obj.SetOwnerID(*own)
|
||||
obj.SetContainerID(cnrID)
|
||||
obj.SetAttributes(attributes...)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func newDir(cnrID cid.ID, cnr container.Container, defaultZone string) *fs.Dir {
|
||||
remote := cnrID.EncodeToString()
|
||||
timestamp := container.CreatedAt(cnr)
|
||||
|
||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
||||
if defaultZone != domain.Zone() {
|
||||
remote = domain.Name() + "." + domain.Zone()
|
||||
} else {
|
||||
remote = domain.Name()
|
||||
}
|
||||
}
|
||||
|
||||
dir := fs.NewDir(remote, timestamp)
|
||||
dir.SetID(cnrID.String())
|
||||
return dir
|
||||
}
|
||||
|
||||
func getContainerNameAndZone(containerStr, defaultZone string) (cnrName string, cnrZone string) {
|
||||
defer func() {
|
||||
if len(cnrZone) == 0 {
|
||||
cnrZone = defaultZone
|
||||
}
|
||||
}()
|
||||
if idx := strings.Index(containerStr, "."); idx >= 0 {
|
||||
return containerStr[:idx], containerStr[idx+1:]
|
||||
}
|
||||
return containerStr, defaultZone
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetZoneAndContainerNames(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
cnrStr string
|
||||
defZone string
|
||||
expectedName string
|
||||
expectedZone string
|
||||
}{
|
||||
{
|
||||
cnrStr: "",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "cnr_name",
|
||||
defZone: "def_zone",
|
||||
expectedName: "cnr_name",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: "cnr_name.",
|
||||
defZone: "def_zone",
|
||||
expectedName: "cnr_name",
|
||||
expectedZone: "def_zone",
|
||||
},
|
||||
{
|
||||
cnrStr: ".cnr_zone",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "cnr_zone",
|
||||
}, {
|
||||
cnrStr: ".cnr_zone",
|
||||
defZone: "def_zone",
|
||||
expectedName: "",
|
||||
expectedZone: "cnr_zone",
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
actualName, actualZone := getContainerNameAndZone(tc.cnrStr, tc.defZone)
|
||||
require.Equal(t, tc.expectedZone, actualZone)
|
||||
require.Equal(t, tc.expectedName, actualName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseContainerCreationPolicy(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
ACLString string
|
||||
ExpectedError bool
|
||||
}{
|
||||
{
|
||||
ACLString: "",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-ready",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read-write",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "private",
|
||||
ExpectedError: false,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
rules, err := parseContainerCreationPolicyString(tc.ACLString)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, rules)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rules)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
EndpointsParam string
|
||||
ExpectedError bool
|
||||
ExpectedResult []endpointInfo
|
||||
}{
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2,3",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: " s01.frostfs.devenv:8080 s02.frostfs.devenv:8080 ",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s03.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,-1,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,,",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,sd,sd",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,0",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
res, err := parseEndpoints(tc.EndpointsParam)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.ExpectedResult, res)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
|||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
|||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`"),
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
|
|
@ -1,311 +0,0 @@
|
|||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,17 +0,0 @@
|
|||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
|
@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
|
|||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path += expires
|
||||
path = path + expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
|
|
@ -1555,7 +1555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
}
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
|
||||
if err == nil && meta != nil {
|
||||
if err != nil && meta != nil {
|
||||
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
|
||||
if !createTimeMeta {
|
||||
createTime = srcObj.createTime
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
//go:build darwin && cgo
|
||||
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// # This is stored with the remote path given
|
||||
//
|
||||
// # It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Create destination
|
||||
dstObj := f.newObject(remote)
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.path
|
||||
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = Clone(srcPath, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
|
||||
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
|
||||
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
|
||||
func Clone(src, dst string) error {
|
||||
state := apfs.CopyFileStateAlloc()
|
||||
defer func() {
|
||||
if err := apfs.CopyFileStateFree(state); err != nil {
|
||||
fs.Errorf(dst, "free state error: %v", err)
|
||||
}
|
||||
}()
|
||||
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
|
||||
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
|
@ -1,16 +0,0 @@
|
|||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
//go:build plan9 || js
|
||||
//go:build windows || plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
|
@ -32,11 +32,9 @@ import (
|
|||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
|
||||
// timeType allows the user to choose what exactly ModTime() returns
|
||||
type timeType = fs.Enum[timeTypeChoices]
|
||||
|
@ -80,46 +78,41 @@ supported by all file systems) under the "user.*" prefix.
|
|||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
|
||||
|
@ -129,12 +122,11 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
|||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
@ -148,12 +140,11 @@ some OSes.
|
|||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
|
@ -184,96 +175,68 @@ directory listing (where the initial stat value comes from on Windows)
|
|||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_clone",
|
||||
Help: `Disable reflink cloning for server-side copies.
|
||||
|
||||
Normally, for local-to-local transfers, rclone will "clone" the file when
|
||||
possible, and fall back to "copying" only when cloning is not supported.
|
||||
|
||||
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
|
||||
the original file. Unlike a "hardlink", the two files are independent and
|
||||
neither will affect the other if subsequently modified.
|
||||
|
||||
Cloning is usually preferable to copying, as it is much faster and is
|
||||
deduplicated by default (i.e. having two identical files does not consume more
|
||||
storage than having just one.) However, for use cases where data redundancy is
|
||||
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
|
||||
|
||||
Currently, cloning is only supported when using APFS on macOS (support for other
|
||||
platforms may be added in the future.)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
|
||||
Normally rclone does all operations on the mtime or Modification time.
|
||||
|
||||
|
@ -292,29 +255,27 @@ will silently replace it with the modification time which all OSes support.
|
|||
Note that setting the time will still set the modified time so this is
|
||||
only useful for reading.
|
||||
`,
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
},
|
||||
},
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
@ -335,7 +296,6 @@ type Options struct {
|
|||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
|
@ -424,10 +384,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
if opt.NoClone {
|
||||
// Disable server-side copy when --local-no-clone is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
|
|
|
@ -73,6 +73,7 @@ func TestUpdatingCheck(t *testing.T) {
|
|||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Test corrupted on transfer
|
||||
|
@ -223,7 +224,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
b := bytes.NewBufferString("CONTENT")
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
@ -268,66 +269,22 @@ func TestMetadata(t *testing.T) {
|
|||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += linkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
|
@ -350,10 +307,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
|
@ -368,21 +321,18 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
@ -392,12 +342,13 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
if hasBtime {
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
|
@ -421,10 +372,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -434,11 +381,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
|
@ -448,10 +391,11 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
if xattrSupported {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
|
@ -628,35 +572,4 @@ func TestCopySymlink(t *testing.T) {
|
|||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
|
||||
// Set fs into "-L/--copy-links" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err = f.NewObject(ctx, "src/link.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a NON-symlink and it has the right contents
|
||||
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||
fi, err := os.Lstat(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
|
||||
// Test that copying a normal file also works
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package local
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -73,12 +72,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
|
|||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
parsed, err := strconv.ParseInt(value, base, 0)
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(parsed)
|
||||
result = int(result64)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
|
@ -105,11 +104,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
err = setBTime(o.path, btime)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
|
@ -125,11 +120,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
|
@ -137,23 +128,9 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
|
|
|
@ -13,9 +13,3 @@ func setBTime(name string, btime time.Time) error {
|
|||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -9,20 +9,15 @@ import (
|
|||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setTimes sets any of atime, mtime or btime
|
||||
// if link is set it sets a link rather than the target
|
||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
if link {
|
||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -32,28 +27,6 @@ func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error)
|
|||
err = closeErr
|
||||
}
|
||||
}()
|
||||
var patime, pmtime, pbtime *syscall.Filetime
|
||||
if !atime.IsZero() {
|
||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
||||
patime = &t
|
||||
}
|
||||
if !mtime.IsZero() {
|
||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
||||
pmtime = &t
|
||||
}
|
||||
if !btime.IsZero() {
|
||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
||||
pbtime = &t
|
||||
}
|
||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
||||
}
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
}
|
||||
|
|
|
@ -923,7 +923,9 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
|
|||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
files[0], files[i] = files[i], files[0]
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
|
|
|
@ -202,14 +202,9 @@ type SharingLinkType struct {
|
|||
type LinkType string
|
||||
|
||||
const (
|
||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
ViewLinkType LinkType = "view"
|
||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EditLinkType LinkType = "edit"
|
||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
||||
// content into a host webpage. Embed links are not available for OneDrive for
|
||||
// Business or SharePoint.
|
||||
EmbedLinkType LinkType = "embed"
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
|
@ -217,12 +212,9 @@ const (
|
|||
type LinkScope string
|
||||
|
||||
const (
|
||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
||||
// This may include people outside of your organization.
|
||||
AnonymousScope LinkScope = "anonymous"
|
||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
OrganizationScope LinkScope = "organization"
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
|
@ -244,14 +236,10 @@ type PermissionsType struct {
|
|||
type Role string
|
||||
|
||||
const (
|
||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
||||
ReadRole Role = "read"
|
||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
WriteRole Role = "write"
|
||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
OwnerRole Role = "owner"
|
||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member"
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
|
|
|
@ -827,7 +827,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429, 503: // Too Many Requests, Server Too Busy
|
||||
case 429: // Too Many Requests.
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(values[0])
|
||||
|
@ -942,8 +942,7 @@ func errorHandler(resp *http.Response) error {
|
|||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
// Redirects have no body so don't report an error
|
||||
if err != nil && resp.Header.Get("Location") == "" {
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
if errResponse.ErrorInfo.Code == "" {
|
||||
|
@ -1928,7 +1927,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||
return shareURL, nil
|
||||
}
|
||||
|
||||
const cnvFailMsg = "Don't know how to convert share link to direct link - returning the link as is"
|
||||
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
|
||||
directURL := ""
|
||||
segments := strings.Split(shareURL, "/")
|
||||
switch f.driveType {
|
||||
|
|
|
@ -58,10 +58,12 @@ func populateSSECustomerKeys(opt *Options) error {
|
|||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
|
|
|
@ -148,7 +148,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
|||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary)
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
|
@ -279,7 +279,7 @@ func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
|||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary))
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
|
|
|
@ -109,37 +109,6 @@ type Hashes struct {
|
|||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// FileTruncateResponse is the response from /file_truncate
|
||||
type FileTruncateResponse struct {
|
||||
Error
|
||||
}
|
||||
|
||||
// FileCloseResponse is the response from /file_close
|
||||
type FileCloseResponse struct {
|
||||
Error
|
||||
}
|
||||
|
||||
// FileOpenResponse is the response from /file_open
|
||||
type FileOpenResponse struct {
|
||||
Error
|
||||
Fileid int64 `json:"fileid"`
|
||||
FileDescriptor int64 `json:"fd"`
|
||||
}
|
||||
|
||||
// FileChecksumResponse is the response from /file_checksum
|
||||
type FileChecksumResponse struct {
|
||||
Error
|
||||
MD5 string `json:"md5"`
|
||||
SHA1 string `json:"sha1"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// FilePWriteResponse is the response from /file_pwrite
|
||||
type FilePWriteResponse struct {
|
||||
Error
|
||||
Bytes int64 `json:"bytes"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
type UploadFileResponse struct {
|
||||
Error
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -147,8 +146,7 @@ we have to rely on user password authentication for it.`,
|
|||
Help: "Your pcloud password.",
|
||||
IsPassword: true,
|
||||
Advanced: true,
|
||||
},
|
||||
}...),
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -163,16 +161,15 @@ type Options struct {
|
|||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
ts *oauthutil.TokenSource // the token source, used to create new clients
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a pcloud object
|
||||
|
@ -320,7 +317,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ts: ts,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
@ -330,7 +326,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if !canCleanup {
|
||||
f.features.CleanUp = nil
|
||||
|
@ -338,7 +333,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
@ -380,56 +375,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
return f, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
client, err := f.newSingleConnClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
// init an empty file
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve src: %w", err)
|
||||
}
|
||||
openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
// Create a new http client, accepting keep-alive headers, limited to single connection.
|
||||
// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection.
|
||||
// File descriptors are only valid within the same connection and auto-closed when the connection is closed,
|
||||
// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions.
|
||||
func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
t.MaxConnsPerHost = 1
|
||||
t.DisableKeepAlives = false
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
|
@ -1149,42 +1094,9 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileID := fileIDtoNumber(o.id)
|
||||
filename = o.fs.opt.Enc.FromStandardName(filename)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/copyfile",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileID)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("toname", filename)
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10))
|
||||
opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10))
|
||||
|
||||
result := &api.ItemResult{}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("update mtime: copyfile: %w", err)
|
||||
}
|
||||
if err := o.setMetaData(&result.Metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
// Pcloud doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be re-uploaded to set the time.
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
|
|
|
@ -1,216 +0,0 @@
|
|||
package pcloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pcloud/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
if c.size < 0 {
|
||||
// Without knowing the size, we cannot do size checks.
|
||||
// Falling back to a sleep of 1s for sake of hope.
|
||||
time.Sleep(1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
sizeOk := false
|
||||
sizeLastSeen := int64(0)
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
||||
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get uploaded obj: %w", err)
|
||||
}
|
||||
sizeLastSeen = obj.Size()
|
||||
if obj.Size() == c.size {
|
||||
sizeOk = true
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
if !sizeOk {
|
||||
return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteAt implements fs.WriteAt.
|
||||
func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
contentLength := len(buffer)
|
||||
|
||||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
outSHA1 := outChecksum.SHA1
|
||||
|
||||
if outSHA1 == "" || inSHA1 == "" {
|
||||
return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1)
|
||||
}
|
||||
|
||||
// check hash of buffer, skip if fits
|
||||
if inSHA1 == outSHA1 {
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
filename = srcFs.opt.Enc.FromStandardName(filename)
|
||||
opts.Parameters.Set("name", filename)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_checksum",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
ctx context.Context,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
contentLength := int64(len(buf))
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_pwrite",
|
||||
Body: bytes.NewReader(buf),
|
||||
ContentLength: &contentLength,
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: false,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("close file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
|
@ -513,72 +513,6 @@ type RequestDecompress struct {
|
|||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// CaptchaToken is a response to requestCaptchaToken api call
|
||||
type CaptchaToken struct {
|
||||
CaptchaToken string `json:"captcha_token"`
|
||||
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
||||
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *CaptchaToken) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *CaptchaToken) Valid() bool {
|
||||
return t != nil && t.CaptchaToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// CaptchaTokenRequest is to request for captcha token
|
||||
type CaptchaTokenRequest struct {
|
||||
Action string `json:"action,omitempty"`
|
||||
CaptchaToken string `json:"captcha_token,omitempty"`
|
||||
ClientID string `json:"client_id,omitempty"`
|
||||
DeviceID string `json:"device_id,omitempty"`
|
||||
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
|
||||
type CaptchaTokenMeta struct {
|
||||
CaptchaSign string `json:"captcha_sign,omitempty"`
|
||||
ClientVersion string `json:"client_version,omitempty"`
|
||||
PackageName string `json:"package_name,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
|
||||
UserName string `json:"username,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
}
|
||||
|
||||
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
|
||||
type Token struct {
|
||||
TokenType string `json:"token_type"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
Sub string `json:"sub"`
|
||||
}
|
||||
|
||||
// Expiry returns expiry from expires in, so it should be called on retrieval
|
||||
// e must be non-nil.
|
||||
func (e *Token) Expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
|
|
@ -3,10 +3,8 @@ package pikpak
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -16,13 +14,10 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
|
@ -267,20 +262,15 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
// If src is zero-length, the API will return
|
||||
// Error "cid and file_size is required" (400)
|
||||
// In this case, we can simply return cid == gcid
|
||||
return cid, nil
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("cid", cid)
|
||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
|
||||
}
|
||||
|
||||
info := struct {
|
||||
|
@ -357,7 +347,7 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
|||
calcBlockSize := func(j int64) int64 {
|
||||
var psize int64 = 0x40000
|
||||
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
|
||||
psize <<= 1
|
||||
psize = psize << 1
|
||||
}
|
||||
return psize
|
||||
}
|
||||
|
@ -378,23 +368,11 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
|||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// unWrapObjectInfo returns the underlying Object unwrapped as much as
|
||||
// possible or nil even if it is an OverrideRemote
|
||||
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
|
||||
if o, ok := oi.(fs.Object); ok {
|
||||
return fs.UnWrapObject(o)
|
||||
} else if do, ok := oi.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
return do.UnWrap()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// calcCid calculates Cid from source
|
||||
//
|
||||
// Cid is a simplified version of Gcid
|
||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
srcObj := unWrapObjectInfo(src)
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj == nil {
|
||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||
}
|
||||
|
@ -430,8 +408,6 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// randomly generates device id used for request header 'x-device-id'
|
||||
//
|
||||
// original javascript implementation
|
||||
|
@ -452,206 +428,3 @@ func genDeviceID() string {
|
|||
}
|
||||
return string(base)
|
||||
}
|
||||
|
||||
var md5Salt = []string{
|
||||
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||
"+r6CQVxjzJV6LCV",
|
||||
"F",
|
||||
"pFJRC",
|
||||
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||
"/750aCr4lm/Sly/c",
|
||||
"RB+DT/gZCrbV",
|
||||
"",
|
||||
"CyLsf7hdkIRxRm215hl",
|
||||
"7xHvLi2tOYP0Y92b",
|
||||
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||
"1UI3",
|
||||
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||
"ihtqpG6FMt65+Xk+tWUH2",
|
||||
"NhXXU9rg4XXdzo7u5o",
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
|
||||
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
||||
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
|
||||
for _, salt := range md5Salt {
|
||||
str = md5Sum(str + salt)
|
||||
}
|
||||
sign = "1." + str
|
||||
return
|
||||
}
|
||||
|
||||
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
|
||||
req = &api.CaptchaTokenRequest{
|
||||
Action: action,
|
||||
CaptchaToken: oldToken, // can be empty initially
|
||||
ClientID: clientID,
|
||||
DeviceID: opt.DeviceID,
|
||||
Meta: new(api.CaptchaTokenMeta),
|
||||
}
|
||||
switch action {
|
||||
case "POST:/v1/auth/signin":
|
||||
req.Meta.UserName = opt.Username
|
||||
default:
|
||||
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
|
||||
req.Meta.CaptchaSign = captchaSign
|
||||
req.Meta.Timestamp = timestamp
|
||||
req.Meta.ClientVersion = clientVersion
|
||||
req.Meta.PackageName = packageName
|
||||
req.Meta.UserID = opt.UserID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CaptchaTokenSource stores updated captcha tokens in the config file
|
||||
type CaptchaTokenSource struct {
|
||||
mu sync.Mutex
|
||||
m configmap.Mapper
|
||||
opt *Options
|
||||
token *api.CaptchaToken
|
||||
ctx context.Context
|
||||
rst *pikpakClient
|
||||
}
|
||||
|
||||
// initialize CaptchaTokenSource from rclone.conf if possible
|
||||
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
|
||||
token := new(api.CaptchaToken)
|
||||
tokenString, ok := m.Get("captcha_token")
|
||||
if !ok || tokenString == "" {
|
||||
fs.Debugf(nil, "failed to read captcha token out of config file")
|
||||
} else {
|
||||
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
|
||||
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
|
||||
}
|
||||
}
|
||||
return &CaptchaTokenSource{
|
||||
m: m,
|
||||
opt: opt,
|
||||
token: token,
|
||||
ctx: ctx,
|
||||
rst: newPikpakClient(getClient(ctx, opt), opt),
|
||||
}
|
||||
}
|
||||
|
||||
// requestToken retrieves captcha token from API
|
||||
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
|
||||
}
|
||||
var info *api.CaptchaToken
|
||||
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
if err == nil && info.ExpiresIn != 0 {
|
||||
// populate to Expiry
|
||||
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
|
||||
cts.token = info // update with a new one
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
|
||||
oldToken := ""
|
||||
if cts.token != nil {
|
||||
oldToken = cts.token.CaptchaToken
|
||||
}
|
||||
action := "GET:/drive/v1/about"
|
||||
if opts.RootURL == "" && opts.Path != "" {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
|
||||
} else if u, err := url.Parse(opts.RootURL); err == nil {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
|
||||
}
|
||||
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
|
||||
if err := cts.requestToken(cts.ctx, req); err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
|
||||
}
|
||||
|
||||
// put it into rclone.conf
|
||||
tokenBytes, err := json.Marshal(cts.token)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
|
||||
}
|
||||
cts.m.Set("captcha_token", string(tokenBytes))
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
|
||||
// Invalidate resets existing captcha token for a forced refresh
|
||||
func (cts *CaptchaTokenSource) Invalidate() {
|
||||
cts.mu.Lock()
|
||||
cts.token.CaptchaToken = ""
|
||||
cts.mu.Unlock()
|
||||
}
|
||||
|
||||
// Token returns a valid captcha token
|
||||
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
|
||||
cts.mu.Lock()
|
||||
defer cts.mu.Unlock()
|
||||
if cts.token.Valid() {
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
return cts.refreshToken(opts)
|
||||
}
|
||||
|
||||
// pikpakClient wraps rest.Client with a handle of captcha token
|
||||
type pikpakClient struct {
|
||||
opt *Options
|
||||
client *rest.Client
|
||||
captcha *CaptchaTokenSource
|
||||
}
|
||||
|
||||
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
|
||||
// * error handler
|
||||
// * root url
|
||||
// * default headers
|
||||
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
|
||||
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
|
||||
for key, val := range map[string]string{
|
||||
"Referer": "https://mypikpak.com/",
|
||||
"x-client-id": clientID,
|
||||
"x-client-version": clientVersion,
|
||||
"x-device-id": opt.DeviceID,
|
||||
// "x-device-model": "firefox%2F129.0",
|
||||
// "x-device-name": "PC-Firefox",
|
||||
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
|
||||
// "x-net-work-type": "NONE",
|
||||
// "x-os-version": "Win32",
|
||||
// "x-platform-version": "1",
|
||||
// "x-protocol-version": "301",
|
||||
// "x-provider-name": "NONE",
|
||||
// "x-sdk-version": "8.0.3",
|
||||
} {
|
||||
client.SetHeader(key, val)
|
||||
}
|
||||
return &pikpakClient{
|
||||
client: client,
|
||||
opt: opt,
|
||||
}
|
||||
}
|
||||
|
||||
// This should be called right after pikpakClient initialized
|
||||
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
|
||||
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
if c.captcha != nil {
|
||||
token, err := c.captcha.Token(opts)
|
||||
if err != nil || token == "" {
|
||||
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
|
||||
}
|
||||
if opts.ExtraHeaders == nil {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
}
|
||||
opts.ExtraHeaders["x-captcha-token"] = token
|
||||
}
|
||||
return c.client.CallJSON(ctx, opts, request, response)
|
||||
}
|
||||
|
||||
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
|
||||
return c.client.Call(ctx, opts)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ package pikpak
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -38,11 +37,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
|
@ -52,7 +50,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
|
@ -66,17 +63,15 @@ import (
|
|||
|
||||
// Constants
|
||||
const (
|
||||
clientID = "YUMx5nI8ZU8Ap8pm"
|
||||
clientVersion = "2.0.0"
|
||||
packageName = "mypikpak.com"
|
||||
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
rcloneClientID = "YNxT9w7GMdWvEOKa"
|
||||
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = s3manager.DefaultUploadConcurrency
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -89,53 +84,43 @@ var (
|
|||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
ClientID: clientID,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Returns OAuthOptions modified for pikpak
|
||||
func pikpakOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Advanced = true
|
||||
} else if opt.Name == config.ConfigClientSecret {
|
||||
opt.Advanced = true
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
|
||||
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
|
||||
if opt.Username == "" {
|
||||
return errors.New("no username")
|
||||
// override default client id/secret
|
||||
if id, ok := m.Get("client_id"); ok && id != "" {
|
||||
oauthConfig.ClientID = id
|
||||
}
|
||||
if secret, ok := m.Get("client_secret"); ok && secret != "" {
|
||||
oauthConfig.ClientSecret = secret
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
|
||||
}
|
||||
// new device id if necessary
|
||||
if len(opt.DeviceID) != 32 {
|
||||
opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
}
|
||||
req := map[string]string{
|
||||
"username": opt.Username,
|
||||
"password": pass,
|
||||
"client_id": clientID,
|
||||
}
|
||||
var token api.Token
|
||||
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
|
||||
rst.captcha.Invalidate()
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
}
|
||||
}
|
||||
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
|
||||
}
|
||||
t := &oauth2.Token{
|
||||
AccessToken: token.AccessToken,
|
||||
TokenType: token.TokenType,
|
||||
RefreshToken: token.RefreshToken,
|
||||
Expiry: token.Expiry(),
|
||||
}
|
||||
return oauthutil.PutToken(name, m, t, false)
|
||||
}
|
||||
|
||||
|
@ -174,7 +159,7 @@ func init() {
|
|||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
|
@ -184,18 +169,6 @@ func init() {
|
|||
Help: "Pikpak password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "device_id",
|
||||
Help: "Device ID used for authorization.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_agent",
|
||||
Default: defaultUserAgent,
|
||||
Advanced: true,
|
||||
Help: fmt.Sprintf(`HTTP user agent for pikpak.
|
||||
|
||||
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
@ -274,7 +247,7 @@ this may help to speed up the transfers.`,
|
|||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -282,9 +255,6 @@ this may help to speed up the transfers.`,
|
|||
type Options struct {
|
||||
Username string `config:"user"`
|
||||
Password string `config:"pass"`
|
||||
UserID string `config:"user_id"` // only available during runtime
|
||||
DeviceID string `config:"device_id"`
|
||||
UserAgent string `config:"user_agent"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
|
@ -300,10 +270,11 @@ type Fs struct {
|
|||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
rst *pikpakClient // the connection to the server
|
||||
rst *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootFolderID string // the id of the root folder
|
||||
deviceID string // device id used for api requests
|
||||
client *http.Client // authorized client
|
||||
m configmap.Mapper
|
||||
tokenMu *sync.Mutex // when renewing tokens
|
||||
|
@ -457,12 +428,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||
} else if apiErr.Reason == "file_space_not_enough" {
|
||||
// "file_space_not_enough" (8): Storage space is not enough
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
|
||||
// "captcha_invalid" (9): Verification code is invalid
|
||||
// This error occurred on the POST:/drive/v1/files endpoint
|
||||
// when a zero-byte file was uploaded with an invalid captcha token
|
||||
f.rst.captcha.Invalidate()
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -486,36 +451,13 @@ func errorHandler(resp *http.Response) error {
|
|||
return errResponse
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
// Override few config settings and create a client
|
||||
newCtx, ci := fs.AddConfig(ctx)
|
||||
ci.UserAgent = opt.UserAgent
|
||||
return fshttp.NewClient(newCtx)
|
||||
}
|
||||
|
||||
// newClientWithPacer sets a new http/rest client with a pacer to Fs
|
||||
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
|
||||
var ts *oauthutil.TokenSource
|
||||
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
|
||||
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// parse user_id from oauth access token for later use
|
||||
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
|
||||
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
|
||||
info := struct {
|
||||
UserID string `json:"sub,omitempty"`
|
||||
}{}
|
||||
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
|
||||
f.opt.UserID = info.UserID
|
||||
}
|
||||
}
|
||||
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
|
||||
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
return nil
|
||||
}
|
||||
|
@ -548,19 +490,9 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||
CanHaveEmptyDirectories: true, // can have empty directories
|
||||
NoMultiThreading: true, // can't have multiple threads downloading
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// new device id if necessary
|
||||
if len(f.opt.DeviceID) != 32 {
|
||||
f.opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", f.opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
|
||||
}
|
||||
f.deviceID = genDeviceID()
|
||||
|
||||
if err := f.newClientWithPacer(ctx); err != nil {
|
||||
// re-authorize if necessary
|
||||
if strings.Contains(err.Error(), "invalid_grant") {
|
||||
return f, f.reAuthorize(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -1258,33 +1190,32 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
|||
|
||||
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable) (err error) {
|
||||
p := resumable.Params
|
||||
endpoint := strings.Join(strings.Split(p.Endpoint, ".")[1:], ".") // "mypikpak.com"
|
||||
|
||||
// Create a credentials provider
|
||||
creds := credentials.NewStaticCredentialsProvider(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken)
|
||||
|
||||
cfg, err := awsconfig.LoadDefaultConfig(ctx,
|
||||
awsconfig.WithCredentialsProvider(creds),
|
||||
awsconfig.WithRegion("pikpak"))
|
||||
cfg := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken),
|
||||
Region: aws.String("pikpak"),
|
||||
Endpoint: &endpoint,
|
||||
}
|
||||
sess, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
partSize := chunksize.Calculator(name, size, s3manager.MaxUploadParts, f.opt.ChunkSize)
|
||||
|
||||
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = aws.String("https://mypikpak.com/")
|
||||
})
|
||||
partSize := chunksize.Calculator(name, size, int(manager.MaxUploadParts), f.opt.ChunkSize)
|
||||
|
||||
// Create an uploader with custom options
|
||||
uploader := manager.NewUploader(client, func(u *manager.Uploader) {
|
||||
// Create an uploader with the session and custom options
|
||||
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
|
||||
u.PartSize = int64(partSize)
|
||||
u.Concurrency = f.opt.UploadConcurrency
|
||||
})
|
||||
// Perform an upload
|
||||
_, err = uploader.Upload(ctx, &s3.PutObjectInput{
|
||||
// Upload input parameters
|
||||
uParams := &s3manager.UploadInput{
|
||||
Bucket: &p.Bucket,
|
||||
Key: &p.Key,
|
||||
Body: in,
|
||||
})
|
||||
}
|
||||
// Perform an upload
|
||||
_, err = uploader.UploadWithContext(ctx, uParams)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1774,7 +1705,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
|||
gcid, err := o.fs.getGcid(ctx, src)
|
||||
if err != nil || gcid == "" {
|
||||
fs.Debugf(o, "calculating gcid: %v", err)
|
||||
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
// No buffering; directly calculate gcid from source
|
||||
rc, err := srcObj.Open(ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,397 +0,0 @@
|
|||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// FilesystemPath is the object which is returned from the pixeldrain API when
|
||||
// running the stat command on a path. It includes the node information for all
|
||||
// the members of the path and for all the children of the requested directory.
|
||||
type FilesystemPath struct {
|
||||
Path []FilesystemNode `json:"path"`
|
||||
BaseIndex int `json:"base_index"`
|
||||
Children []FilesystemNode `json:"children"`
|
||||
}
|
||||
|
||||
// Base returns the base node of the path, this is the node that the path points
|
||||
// to
|
||||
func (fsp *FilesystemPath) Base() FilesystemNode {
|
||||
return fsp.Path[fsp.BaseIndex]
|
||||
}
|
||||
|
||||
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
|
||||
// a Path or Children slice. The Node is also returned as response from update
|
||||
// commands, if requested
|
||||
type FilesystemNode struct {
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Created time.Time `json:"created"`
|
||||
Modified time.Time `json:"modified"`
|
||||
ModeOctal string `json:"mode_octal"`
|
||||
|
||||
// File params
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileType string `json:"file_type"`
|
||||
SHA256Sum string `json:"sha256_sum"`
|
||||
|
||||
// ID is only filled in when the file/directory is publicly shared
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
|
||||
// from the API are on chronological order from old to new. A change log can be
|
||||
// requested for any directory or file, but change logging needs to be enabled
|
||||
// with the update API before any log entries will be made. Changes are logged
|
||||
// for 24 hours after logging was enabled. Each time a change log is requested
|
||||
// the timer is reset to 24 hours.
|
||||
type ChangeLog []ChangeLogEntry
|
||||
|
||||
// ChangeLogEntry is a single entry in a directory's change log. It contains the
|
||||
// time at which the change occurred. The path relative to the requested
|
||||
// directory and the action that was performend (update, move or delete). In
|
||||
// case of a move operation the new path of the file is stored in the path_new
|
||||
// field
|
||||
type ChangeLogEntry struct {
|
||||
Time time.Time `json:"time"`
|
||||
Path string `json:"path"`
|
||||
PathNew string `json:"path_new"`
|
||||
Action string `json:"action"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// UserInfo contains information about the logged in user
|
||||
type UserInfo struct {
|
||||
Username string `json:"username"`
|
||||
Subscription SubscriptionType `json:"subscription"`
|
||||
StorageSpaceUsed int64 `json:"storage_space_used"`
|
||||
}
|
||||
|
||||
// SubscriptionType contains information about a subscription type. It's not the
|
||||
// active subscription itself, only the properties of the subscription. Like the
|
||||
// perks and cost
|
||||
type SubscriptionType struct {
|
||||
Name string `json:"name"`
|
||||
StorageSpace int64 `json:"storage_space"`
|
||||
}
|
||||
|
||||
// APIError is the error type returned by the pixeldrain API
|
||||
type APIError struct {
|
||||
StatusCode string `json:"value"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e APIError) Error() string { return e.StatusCode }
|
||||
|
||||
// Generalized errors which are caught in our own handlers and translated to
|
||||
// more specific errors from the fs package.
|
||||
var (
|
||||
errNotFound = errors.New("pd api: path not found")
|
||||
errExists = errors.New("pd api: node already exists")
|
||||
errAuthenticationFailed = errors.New("pd api: authentication failed")
|
||||
)
|
||||
|
||||
func apiErrorHandler(resp *http.Response) (err error) {
|
||||
var e APIError
|
||||
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
|
||||
return fmt.Errorf("failed to parse error json: %w", err)
|
||||
}
|
||||
|
||||
// We close the body here so that the API handlers can be sure that the
|
||||
// response body is not still open when an error was returned
|
||||
if err = resp.Body.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close resp body: %w", err)
|
||||
}
|
||||
|
||||
if e.StatusCode == "path_not_found" {
|
||||
return errNotFound
|
||||
} else if e.StatusCode == "directory_not_empty" {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
} else if e.StatusCode == "node_already_exists" {
|
||||
return errExists
|
||||
} else if e.StatusCode == "authentication_failed" {
|
||||
return errAuthenticationFailed
|
||||
} else if e.StatusCode == "permission_denied" {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be
|
||||
// retried. It returns the err as a convenience so it can be used as the return
|
||||
// value in the pacer function
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
|
||||
// can understand.
|
||||
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
|
||||
params = make(url.Values)
|
||||
|
||||
if modified, ok := meta["mtime"]; ok {
|
||||
params.Set("modified", modified)
|
||||
}
|
||||
if created, ok := meta["btime"]; ok {
|
||||
params.Set("created", created)
|
||||
}
|
||||
if mode, ok := meta["mode"]; ok {
|
||||
params.Set("mode", mode)
|
||||
}
|
||||
if shared, ok := meta["shared"]; ok {
|
||||
params.Set("shared", shared)
|
||||
}
|
||||
if loggingEnabled, ok := meta["logging_enabled"]; ok {
|
||||
params.Set("logging_enabled", loggingEnabled)
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
// nodeToObject converts a single FilesystemNode API response to an object. The
|
||||
// node is usually a single element from a directory listing
|
||||
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
|
||||
// Trim the path prefix. The path prefix is hidden from rclone during all
|
||||
// operations. Saving it here would confuse rclone a lot. So instead we
|
||||
// strip it here and add it back for every API request we need to perform
|
||||
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
|
||||
return &Object{fs: f, base: node}
|
||||
}
|
||||
|
||||
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
|
||||
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
|
||||
}
|
||||
|
||||
func (f *Fs) escapePath(p string) (out string) {
|
||||
// Add the path prefix, encode all the parts and combine them together
|
||||
var parts = strings.Split(f.pathPrefix+p, "/")
|
||||
for i := range parts {
|
||||
parts[i] = url.PathEscape(parts[i])
|
||||
}
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
func (f *Fs) put(
|
||||
ctx context.Context,
|
||||
path string,
|
||||
body io.Reader,
|
||||
meta fs.Metadata,
|
||||
options []fs.OpenOption,
|
||||
) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(meta)
|
||||
|
||||
// Tell the server to automatically create parent directories if they don't
|
||||
// exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: f.escapePath(path),
|
||||
Body: body,
|
||||
Parameters: params,
|
||||
Options: options,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
Options: options,
|
||||
})
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
|
||||
return fsp, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
// To receive node info from the pixeldrain API you need to add the
|
||||
// ?stat query. Without it pixeldrain will return the file contents
|
||||
// in the URL points to a file
|
||||
Parameters: url.Values{"stat": []string{""}},
|
||||
},
|
||||
nil,
|
||||
&fsp,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
|
||||
return changeLog, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(""),
|
||||
Parameters: url.Values{
|
||||
"change_log": []string{""},
|
||||
"start": []string{start.Format(time.RFC3339Nano)},
|
||||
"end": []string{end.Format(time.RFC3339Nano)},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
&changeLog,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(fields)
|
||||
params.Set("action", "update")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(path),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(dir),
|
||||
MultipartParams: url.Values{"action": []string{"mkdirall"}},
|
||||
NoResponse: true,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
|
||||
|
||||
// Renames a file on the server side. Can be used for both directories and files
|
||||
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
// This is not a pixeldrain FS, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
|
||||
// Path is not in the same root dir, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
}
|
||||
|
||||
var params = paramsFromMetadata(meta)
|
||||
params.Set("action", "rename")
|
||||
|
||||
// The target is always in our own filesystem so here we use our
|
||||
// own pathPrefix
|
||||
params.Set("target", f.pathPrefix+to)
|
||||
|
||||
// Create parent directories if the parent directory of the file
|
||||
// does not exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
// Important: We use the source FS path prefix here
|
||||
Path: srcFs.escapePath(from),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
|
||||
var params url.Values
|
||||
if recursive {
|
||||
// Tell the server to recursively delete all child files
|
||||
params = url.Values{"recursive": []string{"true"}}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: f.escapePath(path),
|
||||
Parameters: params,
|
||||
NoResponse: true,
|
||||
},
|
||||
nil, nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
|
||||
return user, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
// The default RootURL points at the filesystem endpoint. We can't
|
||||
// use that to request user information. So here we override it to
|
||||
// the user endpoint
|
||||
RootURL: f.opt.APIURL + "/user",
|
||||
},
|
||||
nil,
|
||||
&user,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
|
@ -1,567 +0,0 @@
|
|||
// Package pixeldrain provides an interface to the Pixeldrain object storage
|
||||
// system.
|
||||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = time.RFC3339Nano
|
||||
minSleep = pacer.MinSleep(10 * time.Millisecond)
|
||||
maxSleep = pacer.MaxSleep(1 * time.Second)
|
||||
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "pixeldrain",
|
||||
Description: "Pixeldrain Filesystem",
|
||||
NewFs: NewFs,
|
||||
Config: nil,
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: "API key for your pixeldrain account.\n" +
|
||||
"Found on https://pixeldrain.com/user/api_keys.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Root of the filesystem to use.\n\n" +
|
||||
"Set to 'me' to use your personal filesystem. " +
|
||||
"Set to a shared directory ID to use a shared directory.",
|
||||
Default: "me",
|
||||
}, {
|
||||
Name: "api_url",
|
||||
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
|
||||
"this at default. It is only intended to be changed for testing purposes.",
|
||||
Default: "https://pixeldrain.com/api",
|
||||
Advanced: true,
|
||||
Required: true,
|
||||
}},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"mode": {
|
||||
Help: "File mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "755",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
},
|
||||
Help: "Pixeldrain supports file modes and creation times.",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
APIURL string `config:"api_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote, as given to NewFS
|
||||
root string // the path we are working on, as given to NewFS
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer
|
||||
loggedIn bool // if the user is authenticated
|
||||
|
||||
// Pathprefix is the directory we're working in. The pathPrefix is stripped
|
||||
// from every API response containing a path. The pathPrefix always begins
|
||||
// and ends with a slash for concatenation convenience
|
||||
pathPrefix string
|
||||
}
|
||||
|
||||
// Object describes a pixeldrain file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
base FilesystemNode // the node this object references
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Set the path prefix. This is the path to the root directory on the
|
||||
// server. We add it to each request and strip it from each response because
|
||||
// rclone does not want to see it
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
|
||||
// The root URL equates to https://pixeldrain.com/api/filesystem during
|
||||
// normal operation. API handlers need to manually add the pathPrefix to
|
||||
// each request
|
||||
f.srv.SetRoot(opt.APIURL + "/filesystem")
|
||||
|
||||
// If using an APIKey, set the Authorization header
|
||||
if len(opt.APIKey) > 0 {
|
||||
f.srv.SetUserPass("", opt.APIKey)
|
||||
|
||||
// Check if credentials are correct
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user data: %w", err)
|
||||
}
|
||||
|
||||
f.loggedIn = true
|
||||
|
||||
fs.Infof(f,
|
||||
"Logged in as '%s', subscription '%s', storage limit %d",
|
||||
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
|
||||
)
|
||||
}
|
||||
|
||||
if !f.loggedIn && opt.RootFolderID == "me" {
|
||||
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
|
||||
}
|
||||
|
||||
// Satisfy TestFsIsFile. This test expects that we throw an error if the
|
||||
// filesystem root is a file
|
||||
fsp, err := f.stat(ctx, "")
|
||||
if err != errNotFound && err != nil {
|
||||
// It doesn't matter if the root directory does not exist, as long as it
|
||||
// is not a file. This is what the test dictates
|
||||
return f, err
|
||||
} else if err == nil && fsp.Base().Type == "file" {
|
||||
// The filesystem root is a file, rclone wants us to set the root to the
|
||||
// parent directory
|
||||
f.root = path.Dir(f.root)
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fsp, err := f.stat(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "file" {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, len(fsp.Children))
|
||||
for i := range fsp.Children {
|
||||
if fsp.Children[i].Type == "dir" {
|
||||
entries[i] = f.nodeToDirectory(fsp.Children[i])
|
||||
} else {
|
||||
entries[i] = f.nodeToObject(fsp.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fsp, err := f.stat(ctx, remote)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "dir" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return f.nodeToObject(fsp.Base()), nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get object metadata")
|
||||
}
|
||||
|
||||
// Overwrite the mtime if it was not already set in the metadata
|
||||
if _, ok := meta["mtime"]; !ok {
|
||||
if meta == nil {
|
||||
meta = make(fs.Metadata)
|
||||
}
|
||||
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
|
||||
}
|
||||
|
||||
node, err := f.put(ctx, src.Remote(), in, meta, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to put object: %w", err)
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.mkdir(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
// Spec says we do not return an error if the directory already exists
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, false)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration { return time.Millisecond }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, true)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// This is not a pixeldrain object. Can't move
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return nil, fs.ErrorCantMove
|
||||
} else if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return fs.ErrorCantDirMove
|
||||
} else if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
// If the bucket ID is not /me we need to explicitly enable change logging
|
||||
// for this directory or file
|
||||
if f.pathPrefix != "/me/" {
|
||||
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
go f.changeNotify(ctx, notify, newInterval)
|
||||
}
|
||||
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
var ticker = time.NewTicker(<-newInterval)
|
||||
var lastPoll = time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case dur, ok := <-newInterval:
|
||||
if !ok {
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Polling changes at an interval of %s", dur)
|
||||
ticker.Reset(dur)
|
||||
|
||||
case t := <-ticker.C:
|
||||
clog, err := f.changeLog(ctx, lastPoll, t)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range clog {
|
||||
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
|
||||
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
|
||||
|
||||
if clog[i].Type == "dir" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
|
||||
} else if clog[i].Type == "file" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
|
||||
}
|
||||
}
|
||||
|
||||
lastPoll = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Put already supports streaming so we just use that
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the mtime metadata on a directory
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
|
||||
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
return err
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fsn.ID != "" {
|
||||
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
|
||||
|
||||
if user.Subscription.StorageSpace > -1 {
|
||||
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
if err == nil {
|
||||
o.base.Modified = modTime
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return o.fs.read(ctx, o.base.Path, options)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// Copy the parameters and update the object
|
||||
o.base.Modified = src.ModTime(ctx)
|
||||
o.base.FileSize = src.Size()
|
||||
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
|
||||
_, err = o.fs.Put(ctx, in, o, options...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.base.Path, false)
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the SHA-256 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.base.SHA256Sum, nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.base.Modified
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.base.FileSize
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known, or "" if not
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.base.FileType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return fs.Metadata{
|
||||
"mode": o.base.ModeOctal,
|
||||
"mtime": o.base.Modified.Format(timeFormat),
|
||||
"btime": o.base.Created.Format(timeFormat),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify that all the interfaces are implemented correctly
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Info = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.DirEntry = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
)
|
|
@ -1,18 +0,0 @@
|
|||
// Test pixeldrain filesystem interface
|
||||
package pixeldrain_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pixeldrain"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPixeldrain:",
|
||||
NilObject: (*pixeldrain.Object)(nil),
|
||||
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
|
||||
})
|
||||
}
|
|
@ -13,8 +13,7 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// flags
|
||||
|
@ -83,18 +82,15 @@ func main() {
|
|||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
`)
|
||||
|
||||
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||
genSetFrom(new(types.Object), new(types.ObjectVersion))
|
||||
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||
|
|
1068
backend/s3/s3.go
1068
backend/s3/s3.go
File diff suppressed because it is too large
Load diff
|
@ -5,17 +5,15 @@ import (
|
|||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
@ -60,16 +58,6 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||
// "tier" - read only
|
||||
// "btime" - read only
|
||||
}
|
||||
// Cloudflare insists on decompressing `Content-Encoding: gzip` unless
|
||||
// `Cache-Control: no-transform` is supplied. This is a deviation from
|
||||
// AWS but we fudge the tests here rather than breaking peoples
|
||||
// expectations of what Cloudflare does.
|
||||
//
|
||||
// This can always be overridden by using
|
||||
// `--header-upload "Cache-Control: no-transform"`
|
||||
if f.opt.Provider == "Cloudflare" {
|
||||
metadata["cache-control"] = "no-transform"
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
|
@ -143,20 +131,20 @@ func TestVersionLess(t *testing.T) {
|
|||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
a, b *types.ObjectVersion
|
||||
a, b *s3.ObjectVersion
|
||||
want bool
|
||||
}{
|
||||
{a: nil, b: nil, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t2}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key2, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
} {
|
||||
got := versionLess(test.a, test.b)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
|
@ -169,24 +157,24 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
versions []types.ObjectVersion
|
||||
markers []types.DeleteMarkerEntry
|
||||
want []types.ObjectVersion
|
||||
versions []*s3.ObjectVersion
|
||||
markers []*s3.DeleteMarkerEntry
|
||||
want []*s3.ObjectVersion
|
||||
}{
|
||||
{
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{},
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{},
|
||||
},
|
||||
{
|
||||
versions: []types.ObjectVersion{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
|
@ -194,14 +182,14 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []types.ObjectVersion{
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
|
@ -210,7 +198,7 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
versions: []types.ObjectVersion{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
|
@ -220,13 +208,13 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []types.ObjectVersion{
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
|
@ -411,23 +399,22 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
|||
// quirk is set correctly
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.rootBucket,
|
||||
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
||||
LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint),
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucket(ctx, &req)
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
var errString string
|
||||
var awsError smithy.APIError
|
||||
if err == nil {
|
||||
errString = "No Error"
|
||||
} else if errors.As(err, &awsError) {
|
||||
errString = awsError.ErrorCode()
|
||||
} else if awsErr, ok := err.(awserr.Error); ok {
|
||||
errString = awsErr.Code()
|
||||
} else {
|
||||
assert.Fail(t, "Unknown error %T %v", err, err)
|
||||
}
|
||||
|
|
|
@ -4,14 +4,12 @@ package s3
|
|||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
|
@ -56,16 +54,20 @@ func TestAWSDualStackOption(t *testing.T) {
|
|||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if !strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,10 +2,7 @@
|
|||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||
|
@ -30,7 +27,6 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output
|
|||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
|
@ -45,8 +41,8 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers
|
|||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_typesObjectVersion_typesDeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_typesObjectVersion_typesDeleteMarkerEntry(a *types.ObjectVersion, b *types.DeleteMarkerEntry) {
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||
a.IsLatest = b.IsLatest
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
|
@ -64,11 +60,10 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
|||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_typesObject_typesObjectVersion copies matching elements from a to b
|
||||
func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) {
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
|
@ -76,6 +71,7 @@ func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVers
|
|||
a.Owner = b.Owner
|
||||
a.RestoreStatus = b.RestoreStatus
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||
|
@ -86,7 +82,6 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
|||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -101,9 +96,8 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
|||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -117,6 +111,7 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -137,7 +132,6 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
|||
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.CopySource = b.CopySource
|
||||
a.Key = b.Key
|
||||
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||
|
@ -147,6 +141,7 @@ func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput,
|
|||
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||
a.Key = b.Key
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
|
@ -171,7 +166,6 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
|||
a.ETag = b.ETag
|
||||
a.Expiration = b.Expiration
|
||||
a.Expires = b.Expires
|
||||
a.ExpiresString = b.ExpiresString
|
||||
a.LastModified = b.LastModified
|
||||
a.Metadata = b.Metadata
|
||||
a.MissingMeta = b.MissingMeta
|
||||
|
@ -189,14 +183,12 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
|||
a.StorageClass = b.StorageClass
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -210,6 +202,7 @@ func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipart
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -239,7 +232,6 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
|||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -254,9 +246,8 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
|||
|
||||
// setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -270,6 +261,7 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
|
@ -11,9 +10,6 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
|
@ -40,17 +36,10 @@ var s3ParamsToSign = map[string]struct{}{
|
|||
"response-content-encoding": {},
|
||||
}
|
||||
|
||||
// Implement HTTPSignerV4 interface
|
||||
type v2Signer struct {
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using v2 auth.
|
||||
// sign signs requests using v2 auth
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go.
|
||||
//
|
||||
// Bodged up to compile with AWS SDK v2
|
||||
func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
|
@ -118,12 +107,11 @@ func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r
|
|||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(v2.opt.SecretAccessKey))
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
_, _ = hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+v2.opt.AccessKeyID+":"+string(signature))
|
||||
return nil
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
|
|||
// This is only going to be http errors here
|
||||
return "", fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
if len(result.Errors) > 0 {
|
||||
if result.Errors != nil && len(result.Errors) > 0 {
|
||||
return "", errors.New(strings.Join(result.Errors, ", "))
|
||||
}
|
||||
if result.Token == "" {
|
||||
|
|
|
@ -344,7 +344,7 @@ cost of using more memory.
|
|||
Advanced: true,
|
||||
}, {
|
||||
Name: "connections",
|
||||
Help: strings.ReplaceAll(`Maximum number of SFTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.Replace(`Maximum number of SFTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
@ -358,7 +358,7 @@ maximum of |--checkers| and |--transfers|.
|
|||
So for |connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`"),
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
|
|
@ -883,7 +883,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var used, objects, total int64
|
||||
var total, objects int64
|
||||
if f.rootContainer != "" {
|
||||
var container swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -893,9 +893,8 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("container info failed: %w", err)
|
||||
}
|
||||
used = container.Bytes
|
||||
total = container.Bytes
|
||||
objects = container.Count
|
||||
total = container.QuotaBytes
|
||||
} else {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
@ -906,19 +905,14 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
for _, c := range containers {
|
||||
used += c.Bytes
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
total += c.QuotaBytes
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(used), // bytes in use
|
||||
Used: fs.NewUsageValue(total), // bytes in use
|
||||
Objects: fs.NewUsageValue(objects), // objects in use
|
||||
}
|
||||
if total > 0 {
|
||||
usage.Total = fs.NewUsageValue(total)
|
||||
usage.Free = fs.NewUsageValue(total - used)
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
|
@ -1416,6 +1410,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
return
|
||||
}
|
||||
|
||||
// min returns the smallest of x, y
|
||||
func min(x, y int64) int64 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// Get the segments for a large object
|
||||
//
|
||||
// It returns the names of the segments and the container that they live in
|
||||
|
|
|
@ -903,7 +903,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
||||
opt.Remotes[i] += ":ro"
|
||||
opt.Remotes[i] = opt.Remotes[i] + ":ro"
|
||||
}
|
||||
opt.Upstreams = opt.Remotes
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -96,12 +95,6 @@ func TestMoveCopy(t *testing.T) {
|
|||
fLocal := unionFs.upstreams[0].Fs
|
||||
fMemory := unionFs.upstreams[1].Fs
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// need to disable as this test specifically tests a local that can't Copy
|
||||
f.Features().Disable("Copy")
|
||||
fLocal.Features().Disable("Copy")
|
||||
}
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
assert.NotNil(t, f.Features().Move)
|
||||
assert.Nil(t, f.Features().Copy)
|
||||
|
|
|
@ -159,9 +159,7 @@ Set to 0 to disable chunked uploading.
|
|||
Help: "Exclude ownCloud mounted storages",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
},
|
||||
fshttp.UnixSocketConfig,
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -179,7 +177,6 @@ type Options struct {
|
|||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||
UnixSocket string `config:"unix_socket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
|
@ -461,12 +458,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if opt.UnixSocket == "" {
|
||||
client = fshttp.NewClient(ctx)
|
||||
} else {
|
||||
client = fshttp.NewClientWithUnixSocket(ctx, opt.UnixSocket)
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
if opt.Vendor == "sharepoint-ntlm" {
|
||||
// Disable transparent HTTP/2 support as per https://golang.org/pkg/net/http/ ,
|
||||
// otherwise any connection to IIS 10.0 fails with 'stream error: stream ID 39; HTTP_1_1_REQUIRED'
|
||||
|
@ -643,7 +635,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
|||
odrvcookie.NewRenew(12*time.Hour, func() {
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "could not renew cookies: %s", err.Error())
|
||||
fs.Errorf("could not renew cookies: %s", err.Error())
|
||||
return
|
||||
}
|
||||
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
|
@ -39,8 +39,6 @@ const (
|
|||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second // may needs to be increased, testing needed
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
userAgentTemplae = `Yandex.Disk {"os":"windows","dtype":"ydisk3","vsn":"3.2.37.4977","id":"6BD01244C7A94456BBCEE7EEC990AEAD","id2":"0F370CD40C594A4783BC839C846B999C","session_id":"%s"}`
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -81,22 +79,15 @@ func init() {
|
|||
// it doesn't seem worth making an exception for this
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "spoof_ua",
|
||||
Help: "Set the user agent to match an official version of the yandex disk client. May help with upload performance.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SpoofUserAgent bool `config:"spoof_ua"`
|
||||
Token string `config:"token"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote yandex
|
||||
|
@ -263,12 +254,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
if fs.ConfigOptionsInfo.Get("user_agent").IsDefault() && opt.SpoofUserAgent {
|
||||
randomSessionID, _ := random.Password(128)
|
||||
ci.UserAgent = fmt.Sprintf(userAgentTemplae, randomSessionID)
|
||||
}
|
||||
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read OAuth token: %w", err)
|
||||
|
@ -282,13 +267,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't save OAuth token: %w", err)
|
||||
}
|
||||
fs.Logf(nil, "Automatically upgraded OAuth config.")
|
||||
log.Printf("Automatically upgraded OAuth config.")
|
||||
}
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Yandex: %w", err)
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
|
@ -310,14 +296,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
//request object meta info
|
||||
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
|
||||
} else if info.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
} else {
|
||||
if info.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
@ -14,12 +12,7 @@ type Time time.Time
|
|||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
s := string(data)
|
||||
// If the time is a quoted string, strip quotes
|
||||
if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' {
|
||||
s = s[1 : len(s)-1]
|
||||
}
|
||||
millis, err := strconv.ParseInt(s, 10, 64)
|
||||
millis, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -91,73 +84,6 @@ type ItemList struct {
|
|||
Items []Item `json:"data"`
|
||||
}
|
||||
|
||||
// UploadFileInfo is what the FileInfo field in the UnloadInfo struct decodes to
|
||||
type UploadFileInfo struct {
|
||||
OrgID string `json:"ORG_ID"`
|
||||
ResourceID string `json:"RESOURCE_ID"`
|
||||
LibraryID string `json:"LIBRARY_ID"`
|
||||
Md5Checksum string `json:"MD5_CHECKSUM"`
|
||||
ParentModelID string `json:"PARENT_MODEL_ID"`
|
||||
ParentID string `json:"PARENT_ID"`
|
||||
ResourceType int `json:"RESOURCE_TYPE"`
|
||||
WmsSentTime string `json:"WMS_SENT_TIME"`
|
||||
TabID string `json:"TAB_ID"`
|
||||
Owner string `json:"OWNER"`
|
||||
ResourceGroup string `json:"RESOURCE_GROUP"`
|
||||
ParentModelName string `json:"PARENT_MODEL_NAME"`
|
||||
Size int64 `json:"size"`
|
||||
Operation string `json:"OPERATION"`
|
||||
EventID string `json:"EVENT_ID"`
|
||||
AuditInfo struct {
|
||||
VersionInfo struct {
|
||||
VersionAuthors []string `json:"versionAuthors"`
|
||||
VersionID string `json:"versionId"`
|
||||
IsMinorVersion bool `json:"isMinorVersion"`
|
||||
VersionTime Time `json:"versionTime"`
|
||||
VersionAuthorZuid []string `json:"versionAuthorZuid"`
|
||||
VersionNotes string `json:"versionNotes"`
|
||||
VersionNumber string `json:"versionNumber"`
|
||||
} `json:"versionInfo"`
|
||||
Resource struct {
|
||||
Owner string `json:"owner"`
|
||||
CreatedTime Time `json:"created_time"`
|
||||
Creator string `json:"creator"`
|
||||
ServiceType int `json:"service_type"`
|
||||
Extension string `json:"extension"`
|
||||
StatusChangeTime Time `json:"status_change_time"`
|
||||
ResourceType int `json:"resource_type"`
|
||||
Name string `json:"name"`
|
||||
} `json:"resource"`
|
||||
ParentInfo struct {
|
||||
ParentName string `json:"parentName"`
|
||||
ParentID string `json:"parentId"`
|
||||
ParentType int `json:"parentType"`
|
||||
} `json:"parentInfo"`
|
||||
LibraryInfo struct {
|
||||
LibraryName string `json:"libraryName"`
|
||||
LibraryID string `json:"libraryId"`
|
||||
LibraryType int `json:"libraryType"`
|
||||
} `json:"libraryInfo"`
|
||||
UpdateType string `json:"updateType"`
|
||||
StatusCode string `json:"statusCode"`
|
||||
} `json:"AUDIT_INFO"`
|
||||
ZUID int64 `json:"ZUID"`
|
||||
TeamID string `json:"TEAM_ID"`
|
||||
}
|
||||
|
||||
// GetModTime fetches the modification time of the upload
|
||||
//
|
||||
// This tries a few places and if all fails returns the current time
|
||||
func (ufi *UploadFileInfo) GetModTime() Time {
|
||||
if t := ufi.AuditInfo.Resource.CreatedTime; !time.Time(t).IsZero() {
|
||||
return t
|
||||
}
|
||||
if t := ufi.AuditInfo.Resource.StatusChangeTime; !time.Time(t).IsZero() {
|
||||
return t
|
||||
}
|
||||
return Time(time.Now())
|
||||
}
|
||||
|
||||
// UploadInfo is a simplified and slightly different version of
|
||||
// the Item struct only used in the response to uploads
|
||||
type UploadInfo struct {
|
||||
|
@ -165,21 +91,9 @@ type UploadInfo struct {
|
|||
ParentID string `json:"parent_id"`
|
||||
FileName string `json:"notes.txt"`
|
||||
RessourceID string `json:"resource_id"`
|
||||
Permalink string `json:"Permalink"`
|
||||
FileInfo string `json:"File INFO"` // JSON encoded UploadFileInfo
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// GetUploadFileInfo decodes the embedded FileInfo
|
||||
func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||
var ufi UploadFileInfo
|
||||
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||
}
|
||||
return &ufi, nil
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a file Upload
|
||||
type UploadResponse struct {
|
||||
Uploads []UploadInfo `json:"data"`
|
||||
|
|
|
@ -677,26 +677,25 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||
if len(uploadResponse.Uploads) != 1 {
|
||||
return nil, errors.New("upload: invalid response")
|
||||
}
|
||||
upload := uploadResponse.Uploads[0]
|
||||
uploadInfo, err := upload.GetUploadFileInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload error: %w", err)
|
||||
// Received meta data is missing size so we have to read it again.
|
||||
// It doesn't always appear on first read so try again if necessary
|
||||
var info *api.Item
|
||||
const maxTries = 10
|
||||
sleepTime := 100 * time.Millisecond
|
||||
for i := 0; i < maxTries; i++ {
|
||||
info, err = f.readMetaDataForID(ctx, uploadResponse.Uploads[0].Attributes.RessourceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.Attributes.StorageInfo.Size != 0 || size == 0 {
|
||||
break
|
||||
}
|
||||
fs.Debugf(f, "Size not available yet for %q - try again in %v (try %d/%d)", name, sleepTime, i+1, maxTries)
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
}
|
||||
|
||||
// Fill in the api.Item from the api.UploadFileInfo
|
||||
var info api.Item
|
||||
info.ID = upload.Attributes.RessourceID
|
||||
info.Attributes.Name = upload.Attributes.FileName
|
||||
// info.Attributes.Type = not used
|
||||
info.Attributes.IsFolder = false
|
||||
// info.Attributes.CreatedTime = not used
|
||||
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||
// info.Attributes.UploadedTime = 0 not used
|
||||
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||
info.Attributes.StorageInfo.FileCount = 0
|
||||
info.Attributes.StorageInfo.FolderCount = 0
|
||||
|
||||
return &info, nil
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Put the object into the container
|
||||
|
|
|
@ -73,7 +73,7 @@ var osarches = []string{
|
|||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
|
||||
"js/wasm",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
|
|
|
@ -32,9 +32,6 @@ def alter_doc(backend):
|
|||
"""Alter the documentation for backend"""
|
||||
rclone_bin_dir = Path(sys.path[0]).parent.absolute()
|
||||
doc_file = "docs/content/"+backend+".md"
|
||||
doc_file2 = "docs/content/"+backend+"/_index.md"
|
||||
if not os.path.exists(doc_file) and os.path.exists(doc_file2):
|
||||
doc_file = doc_file2
|
||||
if not os.path.exists(doc_file):
|
||||
raise ValueError("Didn't find doc file %s" % (doc_file,))
|
||||
new_file = doc_file+"~new~"
|
||||
|
|
|
@ -41,10 +41,7 @@ docs = [
|
|||
"combine.md",
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filescom.md",
|
||||
"frostfs.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
"googlephotos.md",
|
||||
|
@ -65,14 +62,13 @@ docs = [
|
|||
"azurefiles.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
"oracleobjectstorage/_index.md",
|
||||
"oracleobjectstorage.md",
|
||||
"qingstor.md",
|
||||
"quatrix.md",
|
||||
"sia.md",
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"pixeldrain.md",
|
||||
"premiumizeme.md",
|
||||
"protondrive.md",
|
||||
"putio.md",
|
||||
|
@ -82,6 +78,7 @@ docs = [
|
|||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
"ulozto.md",
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
|
@ -159,7 +156,6 @@ def read_doc(doc):
|
|||
def check_docs(docpath):
|
||||
"""Check all the docs are in docpath"""
|
||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||
files.update(f for f in docs if os.path.exists(os.path.join(docpath,f)))
|
||||
files -= set(ignore_docs)
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
|
|
|
@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
|
@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line)
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
|
@ -52,12 +52,12 @@ func main() {
|
|||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
|
@ -65,7 +65,7 @@ func main() {
|
|||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable)
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
|
51
bin/rules.go
51
bin/rules.go
|
@ -1,51 +0,0 @@
|
|||
// Ruleguard file implementing custom linting rules.
|
||||
//
|
||||
// Note that when used from golangci-lint (using the gocritic linter configured
|
||||
// with the ruleguard check), because rule files are not handled by
|
||||
// golangci-lint itself, changes will not invalidate the golangci-lint cache,
|
||||
// and you must manually clean to cache (golangci-lint cache clean) for them to
|
||||
// be considered, as explained here:
|
||||
// https://www.quasilyte.dev/blog/post/ruleguard/#using-from-the-golangci-lint
|
||||
//
|
||||
// Note that this file is ignored from build with a build constraint, but using
|
||||
// a different than "ignore" to avoid go mod tidy making dsl an indirect
|
||||
// dependency, as explained here:
|
||||
// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting
|
||||
|
||||
//go:build ruleguard
|
||||
// +build ruleguard
|
||||
|
||||
// Package gorules implementing custom linting rules using ruleguard
|
||||
package gorules
|
||||
|
||||
import "github.com/quasilyte/go-ruleguard/dsl"
|
||||
|
||||
// Suggest rewriting "log.(Print|Fatal|Panic)(f|ln)?" to
|
||||
// "fs.(Printf|Fatalf|Panicf)", and do it if running golangci-lint with
|
||||
// argument --fix. The suggestion wraps a single non-string single argument or
|
||||
// variadic arguments in fmt.Sprint to be compatible with format string
|
||||
// argument of fs functions.
|
||||
//
|
||||
// Caveats:
|
||||
// - After applying the suggestions, imports may have to be fixed manually,
|
||||
// removing unused "log", adding "github.com/rclone/rclone/fs" and "fmt",
|
||||
// and if there was a variable named "fs" or "fmt" in the scope the name
|
||||
// clash must be fixed.
|
||||
// - Suggested code is incorrect when within fs package itself, due to the
|
||||
// "fs."" prefix. Could handle it using condition
|
||||
// ".Where(m.File().PkgPath.Matches(`github.com/rclone/rclone/fs`))"
|
||||
// but not sure how to avoid duplicating all checks with and without this
|
||||
// condition so haven't bothered yet.
|
||||
func useFsLog(m dsl.Matcher) {
|
||||
m.Match(`log.Print($x)`, `log.Println($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Log(nil, $x)`)
|
||||
m.Match(`log.Print($*args)`, `log.Println($*args)`).Suggest(`fs.Log(nil, fmt.Sprint($args))`)
|
||||
m.Match(`log.Printf($*args)`).Suggest(`fs.Logf(nil, $args)`)
|
||||
|
||||
m.Match(`log.Fatal($x)`, `log.Fatalln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Fatal(nil, $x)`)
|
||||
m.Match(`log.Fatal($*args)`, `log.Fatalln($*args)`).Suggest(`fs.Fatal(nil, fmt.Sprint($args))`)
|
||||
m.Match(`log.Fatalf($*args)`).Suggest(`fs.Fatalf(nil, $args)`)
|
||||
|
||||
m.Match(`log.Panic($x)`, `log.Panicln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Panic(nil, $x)`)
|
||||
m.Match(`log.Panic($*args)`, `log.Panicln($*args)`).Suggest(`fs.Panic(nil, fmt.Sprint($args))`)
|
||||
m.Match(`log.Panicf($*args)`).Suggest(`fs.Panicf(nil, $args)`)
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the sizes in the rclone binary of each backend by compiling
|
||||
rclone with and without the backend and measuring the difference.
|
||||
|
||||
Run with no arguments to test all backends or a supply a list of
|
||||
backends to test.
|
||||
"""
|
||||
|
||||
all_backends = "backend/all/all.go"
|
||||
|
||||
# compile command which is more or less like the production builds
|
||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||
|
||||
def read_backends():
|
||||
"""
|
||||
Reads the backends file, returning a list of backends and the original file
|
||||
"""
|
||||
with open(all_backends) as fd:
|
||||
orig_all = fd.read()
|
||||
# find the backends
|
||||
backends = []
|
||||
for line in orig_all.split("\n"):
|
||||
match = match_backend.search(line)
|
||||
if match:
|
||||
backends.append(match.group(1))
|
||||
return backends, orig_all
|
||||
|
||||
def write_all(orig_all, backend):
|
||||
"""
|
||||
Write the all backends file without the backend given
|
||||
"""
|
||||
with open(all_backends, "w") as fd:
|
||||
for line in orig_all.split("\n"):
|
||||
match = re.search(r'"github.com/rclone/rclone/backend/(.*?)"', line)
|
||||
# Comment out line matching backend
|
||||
if match and match.group(1) == backend:
|
||||
line = "// " + line
|
||||
fd.write(line+"\n")
|
||||
|
||||
def compile():
|
||||
"""
|
||||
Compile the binary, returning the size
|
||||
"""
|
||||
subprocess.check_call(compile_command)
|
||||
return os.stat("rclone").st_size
|
||||
|
||||
def main():
|
||||
# change directory to the one with this script in
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
# change directory to the main rclone source
|
||||
os.chdir("..")
|
||||
|
||||
to_test = sys.argv[1:]
|
||||
backends, orig_all = read_backends()
|
||||
if len(to_test) == 0:
|
||||
to_test = backends
|
||||
# Compile with all backends
|
||||
ref = compile()
|
||||
print(f"Total binary size {ref/1024/1024:.3f} MiB")
|
||||
print("Backend,Size MiB")
|
||||
for test_backend in to_test:
|
||||
write_all(orig_all, test_backend)
|
||||
new_size = compile()
|
||||
print(f"{test_backend},{(ref-new_size)/1024/1024:.3f}")
|
||||
# restore all file
|
||||
with open(all_backends, "w") as fd:
|
||||
fd.write(orig_all)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -46,7 +46,8 @@ func printValue(what string, uv *int64, isSize bool) {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "about remote:",
|
||||
Short: `Get quota information from the remote.`,
|
||||
Long: `Prints quota information about a remote to standard
|
||||
Long: `
|
||||
` + "`rclone about`" + ` prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
|
|
@ -25,7 +25,8 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
|
|
|
@ -31,7 +31,8 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "backend <command> remote:path [opts] <args>",
|
||||
Short: `Run a backend-specific command.`,
|
||||
Long: `This runs a backend-specific command. The commands themselves (except
|
||||
Long: `
|
||||
This runs a backend-specific command. The commands themselves (except
|
||||
for "help" and "features") are defined by the backends and you should
|
||||
see the backend docs for definitions.
|
||||
|
||||
|
|
|
@ -5,13 +5,20 @@ import (
|
|||
"bytes"
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
logrusSave := logrus.StandardLogger().Writer()
|
||||
defer func() {
|
||||
err := logrusSave.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error closing logrusSave: %v", err)
|
||||
}
|
||||
}()
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -231,7 +232,7 @@ func TestBisyncRemoteLocal(t *testing.T) {
|
|||
t.Skip("path1 and path2 are the same remote")
|
||||
}
|
||||
_, remote, cleanup, err := fstest.RandomRemote()
|
||||
fs.Logf(nil, "remote: %v", remote)
|
||||
log.Printf("remote: %v", remote)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
testBisync(t, remote, *argRemote2)
|
||||
|
@ -243,7 +244,7 @@ func TestBisyncLocalRemote(t *testing.T) {
|
|||
t.Skip("path1 and path2 are the same remote")
|
||||
}
|
||||
_, remote, cleanup, err := fstest.RandomRemote()
|
||||
fs.Logf(nil, "remote: %v", remote)
|
||||
log.Printf("remote: %v", remote)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
testBisync(t, *argRemote2, remote)
|
||||
|
@ -253,7 +254,7 @@ func TestBisyncLocalRemote(t *testing.T) {
|
|||
// (useful for testing server-side copy/move)
|
||||
func TestBisyncRemoteRemote(t *testing.T) {
|
||||
_, remote, cleanup, err := fstest.RandomRemote()
|
||||
fs.Logf(nil, "remote: %v", remote)
|
||||
log.Printf("remote: %v", remote)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
testBisync(t, remote, remote)
|
||||
|
@ -449,13 +450,13 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||
for _, dir := range srcDirs {
|
||||
dirs = append(dirs, norm.NFC.String(dir.Remote()))
|
||||
}
|
||||
fs.Logf(nil, "checking initFs %s", initFs)
|
||||
log.Printf("checking initFs %s", initFs)
|
||||
fstest.CheckListingWithPrecision(b.t, initFs, items, dirs, initFs.Precision())
|
||||
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs1, initFs, true), "setting up path1")
|
||||
fs.Logf(nil, "checking Path1 %s", b.fs1)
|
||||
log.Printf("checking Path1 %s", b.fs1)
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs1, items, dirs, b.fs1.Precision())
|
||||
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs2, initFs, true), "setting up path2")
|
||||
fs.Logf(nil, "checking path2 %s", b.fs2)
|
||||
log.Printf("checking path2 %s", b.fs2)
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs2, items, dirs, b.fs2.Precision())
|
||||
|
||||
// Create log file
|
||||
|
@ -513,21 +514,21 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||
require.NoError(b.t, err, "saving log file %s", savedLog)
|
||||
|
||||
if b.golden && !b.stopped {
|
||||
fs.Logf(nil, "Store results to golden directory")
|
||||
log.Printf("Store results to golden directory")
|
||||
b.storeGolden()
|
||||
return
|
||||
}
|
||||
|
||||
errorCount := 0
|
||||
if b.noCompare {
|
||||
fs.Logf(nil, "Skip comparing results with golden directory")
|
||||
log.Printf("Skip comparing results with golden directory")
|
||||
errorCount = -2
|
||||
} else {
|
||||
errorCount = b.compareResults()
|
||||
}
|
||||
|
||||
if b.noCleanup {
|
||||
fs.Logf(nil, "Skip cleanup")
|
||||
log.Printf("Skip cleanup")
|
||||
} else {
|
||||
b.cleanupCase(ctx)
|
||||
}
|
||||
|
@ -1303,7 +1304,7 @@ func touchFiles(ctx context.Context, dateStr string, f fs.Fs, dir, glob string)
|
|||
return files, fmt.Errorf("invalid date %q: %w", dateStr, err)
|
||||
}
|
||||
|
||||
matcher, firstErr := filter.GlobPathToRegexp(glob, false)
|
||||
matcher, firstErr := filter.GlobToRegexp(glob, false)
|
||||
if firstErr != nil {
|
||||
return files, fmt.Errorf("invalid glob %q", glob)
|
||||
}
|
||||
|
@ -1382,24 +1383,24 @@ func (b *bisyncTest) compareResults() int {
|
|||
const divider = "----------------------------------------------------------"
|
||||
|
||||
if goldenNum != resultNum {
|
||||
fs.Log(nil, divider)
|
||||
fs.Log(nil, color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
||||
fs.Logf(nil, " Golden count: %d", goldenNum)
|
||||
fs.Logf(nil, " Result count: %d", resultNum)
|
||||
fs.Logf(nil, " Golden files: %s", strings.Join(goldenFiles, ", "))
|
||||
fs.Logf(nil, " Result files: %s", strings.Join(resultFiles, ", "))
|
||||
log.Print(divider)
|
||||
log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
||||
log.Printf(" Golden count: %d", goldenNum)
|
||||
log.Printf(" Result count: %d", resultNum)
|
||||
log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", "))
|
||||
log.Printf(" Result files: %s", strings.Join(resultFiles, ", "))
|
||||
}
|
||||
|
||||
for _, file := range goldenFiles {
|
||||
if !resultSet.Has(file) {
|
||||
errorCount++
|
||||
fs.Logf(nil, " File found in Golden but not in Results: %s", file)
|
||||
log.Printf(" File found in Golden but not in Results: %s", file)
|
||||
}
|
||||
}
|
||||
for _, file := range resultFiles {
|
||||
if !goldenSet.Has(file) {
|
||||
errorCount++
|
||||
fs.Logf(nil, " File found in Results but not in Golden: %s", file)
|
||||
log.Printf(" File found in Results but not in Golden: %s", file)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1432,15 +1433,15 @@ func (b *bisyncTest) compareResults() int {
|
|||
text, err := difflib.GetUnifiedDiffString(diff)
|
||||
require.NoError(b.t, err, "diff failed")
|
||||
|
||||
fs.Log(nil, divider)
|
||||
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
log.Print(divider)
|
||||
log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
fs.Logf(nil, "| %s", strings.TrimSpace(line))
|
||||
log.Printf("| %s", strings.TrimSpace(line))
|
||||
}
|
||||
}
|
||||
|
||||
if errorCount > 0 {
|
||||
fs.Log(nil, divider)
|
||||
log.Print(divider)
|
||||
}
|
||||
if errorCount == 0 && goldenNum != resultNum {
|
||||
return -1
|
||||
|
@ -1463,7 +1464,7 @@ func (b *bisyncTest) storeGolden() {
|
|||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
fs.Logf(nil, "skipping: %v", fileName)
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
goldName := b.toGolden(fileName)
|
||||
|
@ -1488,7 +1489,7 @@ func (b *bisyncTest) storeGolden() {
|
|||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
fs.Logf(nil, "skipping: %v", fileName)
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
text := b.mangleResult(b.goldenDir, fileName, true)
|
||||
|
@ -1741,8 +1742,8 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
|||
b.path2, "{path2/}",
|
||||
b.replaceHex(b.path1), "{path1/}",
|
||||
b.replaceHex(b.path2), "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.ReplaceAll(b.path1, slash, "/"), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.ReplaceAll(b.path2, slash, "/"), "/"), "{path2}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.workDir, "{workdir}",
|
||||
|
@ -1848,7 +1849,7 @@ func fileType(fileName string) string {
|
|||
// logPrintf prints a message to stdout and to the test log
|
||||
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
||||
line := fmt.Sprintf(text, args...)
|
||||
fs.Log(nil, line)
|
||||
log.Print(line)
|
||||
if b.logFile != nil {
|
||||
_, err := fmt.Fprintln(b.logFile, line)
|
||||
require.NoError(b.t, err, "writing log file")
|
||||
|
|
|
@ -107,10 +107,10 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
|
|||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Debugf(src, "%s", err.Error())
|
||||
fs.Debugf(src, err.Error())
|
||||
// using same error msg as CheckFn so integration tests match
|
||||
err = fmt.Errorf("%v differ", hashType)
|
||||
fs.Errorf(src, "%s", err.Error())
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
|
|
|
@ -62,41 +62,42 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
|||
b.setHashType(ci)
|
||||
}
|
||||
|
||||
// Checks and Warnings
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
|
@ -123,13 +124,13 @@ func sizeDiffers(a, b int64) bool {
|
|||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -151,7 +152,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
|
@ -159,7 +160,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
|
@ -167,25 +168,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
|
@ -232,7 +233,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
|||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,14 +285,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
|||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
|
|
|
@ -190,48 +190,50 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else if !now.isDir(file) {
|
||||
} else {
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
if !now.isDir(file) {
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,10 +43,8 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT
|
|||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
|
||||
// TZ defines time zone used in listings
|
||||
var (
|
||||
TZ = time.UTC
|
||||
tzLocal = false
|
||||
)
|
||||
var TZ = time.UTC
|
||||
var tzLocal = false
|
||||
|
||||
// fileInfo describes a file
|
||||
type fileInfo struct {
|
||||
|
@ -85,7 +83,7 @@ func (ls *fileList) has(file string) bool {
|
|||
}
|
||||
_, found := ls.info[file]
|
||||
if !found {
|
||||
// try unquoting
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
_, found = ls.info[file]
|
||||
}
|
||||
|
@ -95,7 +93,7 @@ func (ls *fileList) has(file string) bool {
|
|||
func (ls *fileList) get(file string) *fileInfo {
|
||||
info, found := ls.info[file]
|
||||
if !found {
|
||||
// try unquoting
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
info = ls.info[fmt.Sprint(file)]
|
||||
}
|
||||
|
@ -422,7 +420,7 @@ func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
|
|||
|
||||
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
var fulllisting *fileList
|
||||
dirsonly := newFileList()
|
||||
var dirsonly = newFileList()
|
||||
var err error
|
||||
|
||||
if !b.opt.CreateEmptySrcDirs {
|
||||
|
@ -452,6 +450,24 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
|||
return dirsonly, err
|
||||
}
|
||||
|
||||
// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged
|
||||
// Need to use the other fs's precision (if lower) when copying
|
||||
// Note: we need to use Truncate rather than Round so that After() is reliable.
|
||||
// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC)
|
||||
func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time {
|
||||
DestPrecision := dst.Precision()
|
||||
|
||||
// In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest.
|
||||
if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) {
|
||||
DestPrecision = fs.UnWrapFs(dst).Precision()
|
||||
}
|
||||
|
||||
if Modtime.After(Modtime.Truncate(DestPrecision)) {
|
||||
return Modtime.Truncate(DestPrecision)
|
||||
}
|
||||
return Modtime
|
||||
}
|
||||
|
||||
// modifyListing will modify the listing based on the results of the sync
|
||||
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
||||
queue := queues.copy2to1
|
||||
|
@ -517,13 +533,13 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
|||
|
||||
// build src winners list
|
||||
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
srcWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build dst winners list
|
||||
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
dstWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
|
@ -607,7 +623,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
|||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags)
|
||||
}
|
||||
if srcNewName != srcOldName {
|
||||
srcList.remove(srcOldName)
|
||||
|
|
|
@ -39,8 +39,8 @@ func (b *bisyncRun) indent(tag, file, msg string) {
|
|||
tag = Color(terminal.BlueFg, tag)
|
||||
}
|
||||
msg = Color(terminal.MagentaFg, msg)
|
||||
msg = strings.ReplaceAll(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"))
|
||||
msg = strings.ReplaceAll(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"))
|
||||
msg = strings.Replace(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"), -1)
|
||||
msg = strings.Replace(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"), -1)
|
||||
file = Color(terminal.CyanFg, escapePath(file, false))
|
||||
logf(nil, "- %-18s%-43s - %s", tag, msg, file)
|
||||
}
|
||||
|
@ -66,8 +66,7 @@ func quotePath(path string) string {
|
|||
return escapePath(path, true)
|
||||
}
|
||||
|
||||
// Colors controls whether terminal colors are enabled
|
||||
var Colors bool
|
||||
var Colors bool // Colors controls whether terminal colors are enabled
|
||||
|
||||
// Color handles terminal colors for bisync
|
||||
func Color(style string, s string) string {
|
||||
|
|
|
@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
|
@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
|
@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
} else {
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
|
@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
|
@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
|
@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
|
@ -476,8 +476,10 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
|||
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
||||
b.indent("ERROR", file, "Path1 file not found in Path2")
|
||||
ok = false
|
||||
} else if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
} else {
|
||||
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, file := range files2.list {
|
||||
|
@ -567,7 +569,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
|
|||
|
||||
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
|
||||
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
||||
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
err = fmt.Errorf(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
return err
|
||||
}
|
||||
// need to test our BackupDirs too, as sync will be fooled by our --files-from filters
|
||||
|
@ -623,7 +625,7 @@ func (b *bisyncRun) checkSyntax() error {
|
|||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
|||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
})
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue