Compare commits

...

55 commits

Author SHA1 Message Date
Nick Craig-Wood
cc0e304251 vfs: fix open files disappearing from directory listings
In this commit

aaadb48d48 vfs: keep virtual directory status accurate and reduce deadlock potential

We reworked the virtual directory detection to use an atomic bool so
that we could run part of the cache forgetting only with a read lock.

Unfortunately this had a bug which meant that directories with virtual
items could be forgotten.

This commit changes the boolean into a count of virtual entries which
should be more accurate.

Fixes #8082
2024-10-01 15:51:05 +01:00
Nick Craig-Wood
c053429b9c onedrive: fix time precision for OneDrive personal
This reduces the precision advertised by the backend from 1ms to 1s
for OneDrive personal accounts.

The precision was set to 1ms as part of:

1473de3f04 onedrive: add metadata support

which was released in v1.66.0.

However it appears not all OneDrive personal accounts support 1ms time
precision and that Microsoft may be migrating accounts away from this
to backends which only support 1s precision.

Fixes #8101
2024-09-30 11:34:06 +01:00
Nick Craig-Wood
18989fbf85 Add RcloneView as a sponsor 2024-09-30 11:34:06 +01:00
Nick Craig-Wood
a7451c6a77 Add Leandro Piccilli to contributors 2024-09-30 11:32:13 +01:00
nielash
5147d1101c cache: skip bisync tests
per ncw: "I don't care about cache as it is deprecated - we should probably stop
it running bisync tests"
https://github.com/rclone/rclone/pull/7795#issuecomment-2163295857
2024-09-29 18:37:52 -04:00
nielash
11ad2a1316 bisync: allow blank hashes on tests
Some backends support hashes but allow them to be blank. In other words, we
can't expect them to be reliably non-blank, and we shouldn't treat a blank hash
as an error.

Before this change, the bisync integration tests errored if a backend said it
supported hashes but in fact sometimes lacked them. After this change, such
errors are ignored.
2024-09-29 18:37:52 -04:00
nielash
3c7ad8d961 box: fix server-side copying a file over existing dst - fixes #3511
Before this change, server-side copying a src file over a dst that already exists
gave `Error "item_name_in_use" (409): Item with the same name already exists`.

This change fixes the error by copying to a temporary name first, then moving it
to the real name.

There might be a more graceful way to overwrite a file during a copy, but I
didn't see one in the API docs.
https://developer.box.com/reference/post-files-id-copy/
In the meantime, this workaround is better than a critical error.

This should (hopefully) fix 8 bisync integration tests.
2024-09-29 18:37:52 -04:00
nielash
a3e8fb584a sync: add tests for copying/moving a file over itself
This should catch issues like this, for example:
https://github.com/rclone/rclone/issues/3511#issuecomment-528332895
2024-09-29 18:37:52 -04:00
nielash
9b4b3033da fs/cache: fix parent not getting pinned when remote is a file
Before this change, when cache.GetFn was called on a file rather than a
directory, two cache entries would be added (the file + its parent) but only one
of them would get pinned if the caller then called Pin(f). This left the other
one exposed to expiration if the ci.FsCacheExpireDuration was reached. This was
problematic because both entries point to the same Fs, and if one entry expires
while the other is pinned, the Shutdown method gets erroneously called on an Fs
that is still in use.

An example of the problem showed up in the Hasher backend, which uses the
Shutdown method to stop the bolt db used to store hashes. If a command was run
on a Hasher file (ex. `rclone md5sum --download hasher:somelargefile.zip`) and
hashing the file took longer than the --fs-cache-expire-duration (5m by default), the
bolt db was stopped before the hashing operation completed, resulting in an
error.

This change fixes the issue by ensuring that:
1. only one entry is added to the cache (the file's parent, not the file).
2. future lookups correctly find the entry regardless of whether they are called
	with the parent name or one of its children.
3. fs.ErrorIsFile is returned when (and only when) fsString points to a file
	(preserving the fix from 8d5bc7f28b).

Note that f.Root() should always point to the parent dir as of c69eb84573
2024-09-28 13:49:56 +01:00
Leandro Piccilli
94997d25d2 gcs: add access token auth with --gcs-access-token 2024-09-27 17:37:07 +01:00
Nick Craig-Wood
19458e8459 accounting: write the current bwlimit to the log on SIGUSR2 2024-09-26 18:01:18 +01:00
Nick Craig-Wood
7d32da441e accounting: fix wrong message on SIGUSR2 to enable/disable bwlimit
This was caused by the message code only looking at one of the
bandwidth filters, not all of them.

Fixes #8104
2024-09-26 17:53:58 +01:00
Nick Craig-Wood
22e13eea47 gphotos: implment --gphotos-proxy to allow download of full resolution media
This works in conjunction with the gphotosdl tool

https://github.com/rclone/gphotosdl
2024-09-26 12:57:28 +01:00
Nick Craig-Wood
de9b593f02 googlephotos: remove noisy debugging statements 2024-09-26 12:52:53 +01:00
Nick Craig-Wood
b2b4f8196c docs: add note to CONTRIBUTING that the overview needs editing in 2 places 2024-09-25 17:56:33 +01:00
Nick Craig-Wood
84cebb6872 test_all: add ignoretests parameter for skipping certain tests
Use like this for a `backend:` in `config.yaml`

   ignoretests:
     - "fs/operations"
     - "fs/sync"
2024-09-25 16:03:43 +01:00
Nick Craig-Wood
cb9f4f8461 build: replace "golang.org/x/exp/slices" with "slices" now go1.21 is required 2024-09-25 16:03:43 +01:00
Nick Craig-Wood
498d9cfa85 Changelog updates from Version v1.68.1 2024-09-24 17:26:49 +01:00
Dan McArdle
109e4ed0ed Makefile: Fail when doc recipes create dir named '$HOME'
This commit makes the `commanddocs` and `backenddocs` fail if they
accidentally create a directory named '$HOME'. This is basically a
regression test for issue #8092.

It also makes those recipes rmdir the '$HOME/.config/rclone/'
directories. This will only delete empty directories, so nothing of
value should ever be deleted.
2024-09-24 10:38:25 +01:00
Dan McArdle
353270263a Makefile: Prevent doc recipe from creating dir named '$HOME'
Prior to this commit, running `make doc` had the unwanted side effect of
creating a directory literally named `$HOME` in the source tree.

Fixed #8092
2024-09-24 10:38:25 +01:00
wiserain
f8d782c02d
pikpak: fix cid/gcid calculations for fs.OverrideRemote
Previously, cid/gcid (custom hash for pikpak) calculations failed when 
attempting to unwrap object info from `fs.OverrideRemote`. 

This commit introduces a new function that can correctly unwrap 
object info from both regular objects and `fs.OverrideRemote` types, 
ensuring uploads with accurate cid/gcid calculations in all scenarios.
2024-09-21 10:22:31 +09:00
albertony
3dec664a19 bisync: change exit code from 2 to 7 for critically aborted run 2024-09-20 18:51:08 +02:00
albertony
a849fd59f0 cmd: change exit code from 1 to 2 for syntax and usage errors 2024-09-20 18:51:08 +02:00
nielash
462a1cf491 local: fix --copy-links on macOS when cloning
Before this change, --copy-links erroneously behaved like --links when using cloning
on macOS, and cloning was not supported at all when using --links.

After this change, --copy-links does what it's supposed to, and takes advantage of
cloning when possible, by copying the file being linked to instead of the link
itself.

Cloning is now also supported in --links mode for regular files (which benefit
most from cloning). symlinks in --links mode continue to be tossed back to be
handled by rclone's special translation logic.

See https://forum.rclone.org/t/macos-local-to-local-copy-with-copy-links-causes-error/47671/5?u=nielash
2024-09-20 17:43:52 +01:00
Nick Craig-Wood
0b7b3cacdc azureblob: add --azureblob-use-az to force the use of the Azure CLI for auth
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.

Fixes #8078
2024-09-20 16:16:09 +01:00
Nick Craig-Wood
976103d50b azureblob: add --azureblob-disable-instance-discovery
If set this skips requesting Microsoft Entra instance metadata

See #8078
2024-09-20 16:16:09 +01:00
Nick Craig-Wood
192524c004 s3: add initial --s3-directory-bucket to support AWS Directory Buckets
This will ensure no Content-Md5 headers are sent and ensure ETags are not
interpreted as MD5 sums. X-Amz-Meta-Md5chksum will be set on all objects
whether single or multipart uploaded.

This also sets "no_check_bucket = true".

This is enough to make the integration tests pass, but there are some
limitations as noted in the docs.

See: https://forum.rclone.org/t/support-s3-directory-bucket/47653/
2024-09-19 12:01:24 +01:00
Nick Craig-Wood
28667f58bf Add Lawrence Murray to contributors 2024-09-19 12:01:24 +01:00
Lawrence Murray
c669f4e218
backend/protondrive: improve performance of Proton Drive backend
This change removes redundant calls to the Proton Drive Bridge when
creating Objects. Specifically, the function List() would get a
directory listing, get a link for each file, construct a remote path
from that link, then get a link for that remote path again by calling
getObjectLink() unnecessarily. This change removes that unnecessary
call, and tidies up a couple of functions around this with unused
parameters.

Related to performance issues reported in #7322 and #7413
2024-09-18 18:15:24 +01:00
Nick Craig-Wood
1a9e6a527d ftp: implement --ftp-no-check-upload to allow upload to write only dirs
Fixes #8079
2024-09-18 12:57:01 +01:00
Nick Craig-Wood
8c48cadd9c docs: document that fusermount3 may be needed when mounting/unmounting
See: https://forum.rclone.org/t/documentation-fusermount-vs-fusermount3/47816/
2024-09-18 12:57:01 +01:00
Nick Craig-Wood
76e1ba8c46 Add rishi.sridhar to contributors 2024-09-18 12:57:01 +01:00
Nick Craig-Wood
232e4cd18f Add quiescens to contributors 2024-09-18 12:57:00 +01:00
buengese
88141928f2 docs/zoho: update options 2024-09-17 20:40:42 +01:00
buengese
a2a0388036 zoho: make upload cutoff configurable 2024-09-17 20:40:42 +01:00
buengese
48543d38e8 zoho: add support for private spaces 2024-09-17 20:40:42 +01:00
buengese
eceb390152 zoho: try to handle rate limits a bit better 2024-09-17 20:40:42 +01:00
buengese
f4deffdc96 zoho: print clear error message when missing oauth scope 2024-09-17 20:40:42 +01:00
buengese
c172742cef zoho: switch to large file upload API for larger files, fix missing URL encoding of filenames for the upload API 2024-09-17 20:40:42 +01:00
buengese
7daed30754 zoho: use download server to accelerate downloads
Co-authored-by: rishi.sridhar <rishi.sridhar@zohocorp.com>
2024-09-17 20:40:42 +01:00
quiescens
b1b4c7f27b
opendrive: add about support to backend 2024-09-17 17:20:42 +01:00
wiserain
ed84553dc1
pikpak: fix login issue where token retrieval fails
This addresses the login issue caused by pikpak's recent cancellation 
of existing login methods and requirement for additional verifications. 

To resolve this, we've made the following changes:

1. Similar to lib/oauthutil, we've integrated a mechanism to handle 
captcha tokens.

2. A new pikpakClient has been introduced to wrap the existing 
rest.Client and incorporate the necessary headers including 
x-captcha-token for each request.

3. Several options have been added/removed to support persistent 
user/client identification.

* client_id: No longer configurable.
* client_secret: Deprecated as it's no longer used.
* user_agent: A new option that defaults to PC/Firefox's user agent 
but can be overridden using the --pikpak-user-agent flag.
* device_id: A new option that is randomly generated if invalid. 
It is recommended not to delete or change it frequently.
* captcha_token: A new option that is automatically managed 
by rclone, similar to the OAuth token.

Fixes #7950 #8005
2024-09-18 01:09:21 +09:00
Nick Craig-Wood
c94edbb76b webdav: nextcloud: implement backoff and retry for 423 LOCKED errors
When uploading chunked files to nextcloud, it gives a 423 error while
it is merging files.

This waits for an exponentially increasing amount of time for it to
clear.

If after we have received a 423 error we receive a 404 error then we
assume all is good as this is what appears to happen in practice.

Fixes #7109
2024-09-17 16:46:02 +01:00
Nick Craig-Wood
2dcb327bc0 s3: fix rclone ignoring static credentials when env_auth=true
The SDKv2 conversion introduced a regression to do with setting
credentials with env_auth=true. The rclone documentation explicitly
states that env_auth only applies if secret_access_key and
access_key_id are blank and users had been relying on that.

However after the SDKv2 conversion we were ignoring static credentials
if env_auth=true.

This fixes the problem by ignoring env_auth=true if secret_access_key
and access_key_id are both provided. This brings rclone back into line
with the documentation and users expectations.

Fixes #8067
2024-09-17 16:07:56 +01:00
Nick Craig-Wood
874d66658e fs: fix setting stringArray config values from environment variables
After the config re-organisation, the setting of stringArray config
values (eg `--exclude` set with `RCLONE_EXCLUDE`) was broken and gave
a message like this for `RCLONE_EXCLUDE=*.jpg`:

    Failed to load "filter" default values: failed to initialise "filter" options:
    couldn't parse config item "exclude" = "*.jpg" as []string: parsing "*.jpg" as []string failed:
    invalid character '/' looking for beginning of value

This was caused by the parser trying to parse the input string as a
JSON value.

When the config was re-organised it was thought that the internal
representation of stringArray values was not important as it was never
visible externally, however this turned out not to be true.

A defined representation was chosen - a comma separated string and
this was documented and tests were introduced in this patch.

This potentially introduces a very small backwards incompatibility. In
rclone v1.67.0

    RCLONE_EXCLUDE=a,b

Would be interpreted as

    --exclude "a,b"

Whereas this new code will interpret it as

    --exclude "a" --exclude "b"

The benefit of being able to set multiple values with an environment
variable was deemed to outweigh the very small backwards compatibility
risk.

If a value with a `,` is needed, then use CSV escaping, eg

    RCLONE_EXCLUDE="a,b"

(Note this needs to have the quotes in so at the unix shell that would be

    RCLONE_EXCLUDE='"a,b"'

Fixes #8063
2024-09-13 15:52:51 +01:00
Nick Craig-Wood
3af757e26d rc: fix default value of --metrics-addr
Before this fix it was empty string, which isn't a good default for a
stringArray.
2024-09-13 15:52:51 +01:00
Nick Craig-Wood
fef1b61585 fs: fix --dump filters not always appearing
Before this fix, we initialised the options blocks in a random order.
This meant that there was a 50/50 chance whether --dump filters would
show the filters or not as it was depending on the "main" block having
being read first to set the Dump flags.

This initialises the options blocks in a defined order which is
alphabetically but with main first which fixes the problem.
2024-09-13 15:52:51 +01:00
Nick Craig-Wood
3fca7a60a5 docs: correct notes on docker manual build 2024-09-13 15:52:51 +01:00
Nick Craig-Wood
6b3f41fa0c Add ttionya to contributors 2024-09-13 15:52:51 +01:00
ttionya
3d0ee47aa2
build: fix docker release build - fixes #8062
This updates the action to use `docker/build-push-action` instead of `ilteoood/docker_buildx`
which fixes the build problem in testing.
2024-09-12 17:57:53 +01:00
Pawel Palucha
da70088b11 docs: add section for improving performance for s3 2024-09-12 11:29:35 +01:00
Nick Craig-Wood
1bc9b94cf2 onedrive: fix spurious "Couldn't decode error response: EOF" DEBUG
This DEBUG was being generated on redirects which don't have a JSON
body and is irrelevant.
2024-09-10 16:19:38 +01:00
Nick Craig-Wood
15a026d3be Add Divyam to contributors 2024-09-10 16:19:38 +01:00
Divyam
ad122c6f6f
serve docker: add missing vfs-read-chunk-streams option in docker volume driver 2024-09-09 10:07:25 +01:00
Nick Craig-Wood
b155231cdd Start v1.69.0-DEV development 2024-09-08 17:22:19 +01:00
68 changed files with 1790 additions and 326 deletions

View file

@ -32,15 +32,27 @@ jobs:
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'

View file

@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation

View file

@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone
bin/make_rc_docs.sh

View file

@ -168,6 +168,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
```

View file

@ -1 +1 @@
v1.68.0
v1.69.0

View file

@ -209,6 +209,22 @@ rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
keys instead of setting ` + "`service_principal_file`" + `.
`,
Advanced: true,
}, {
Name: "disable_instance_discovery",
Help: `Skip requesting Microsoft Entra instance metadata
This should be set true only by applications authenticating in
disconnected clouds, or private clouds such as Azure Stack.
It determines whether rclone requests Microsoft Entra instance
metadata from ` + "`https://login.microsoft.com/`" + ` before
authenticating.
Setting this to true will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
`,
Default: false,
Advanced: true,
}, {
Name: "use_msi",
Help: `Use a managed service identity to authenticate (only works in Azure).
@ -243,6 +259,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
Default: false,
Advanced: true,
}, {
Name: "use_az",
Help: `Use Azure CLI tool az for authentication
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.
Don't set env_auth at the same time.
`,
Default: false,
Advanced: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@ -438,10 +468,12 @@ type Options struct {
Username string `config:"username"`
Password string `config:"password"`
ServicePrincipalFile string `config:"service_principal_file"`
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"`
UseAZ bool `config:"use_az"`
Endpoint string `config:"endpoint"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
@ -725,7 +757,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{
ClientOptions: policyClientOptions,
ClientOptions: policyClientOptions,
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
}
cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil {
@ -875,6 +908,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
case opt.UseAZ:
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
if err != nil {
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
}
case opt.Account != "":
// Anonymous access
anonymous = true

View file

@ -43,6 +43,7 @@ import (
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
@ -256,7 +257,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
// fmt.Printf("...Error %v\n", err)
return "", err
}
// fmt.Printf("...Id %q\n", *info.Id)
@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// check if dest already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random.String(8)
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
if err != nil {
return nil, err
}
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
err = f.deleteObject(ctx, item.ID)
if err != nil {
return nil, err
}
return f.Move(ctx, tempObj, remote)
}
// Copy the object
opts := rest.Opts{
Method: "POST",

View file

@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
Example:
myUser:myPass@localhost:9005
`,
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "no_check_upload",
Default: false,
Help: `Don't check the upload is OK
Normally rclone will try to check the upload exists after it has
uploaded a file to make sure the size and modification time are as
expected.
This flag stops rclone doing these checks. This enables uploading to
folders which are write only.
You will likely need to use the --inplace flag also if uploading to
a write only folder.
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
@ -232,6 +248,7 @@ type Options struct {
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
// Fs represents a remote FTP server
@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if o.fs.opt.NoCheckUpload {
o.info = &FileInfo{
Name: o.remote,
Size: uint64(src.Size()),
ModTime: src.ModTime(ctx),
precise: true,
IsDir: false,
}
return nil
}
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err)
}

View file

@ -60,16 +60,14 @@ const (
minSleep = 10 * time.Millisecond
)
var (
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Description of how to auth for this app
var storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Register with Fs
func init() {
@ -106,6 +104,12 @@ func init() {
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "access_token",
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Sensitive: true,
Advanced: true,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@ -379,6 +383,7 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
AccessToken string `config:"access_token"`
}
// Fs represents a remote storage server
@ -535,6 +540,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else if opt.AccessToken != "" {
ts := oauth2.Token{AccessToken: opt.AccessToken}
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@ -944,7 +952,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist

View file

@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@ -160,6 +159,34 @@ listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}, {
Name: "proxy",
Default: "",
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you ue the gphotosdl proxy tnen you can download original,
unchanged images.
This runs a headless browser in the background.
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
First run with
gphotosdl -login
Then once you have logged into google photos close the browser window
and run
gphotosdl
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
rclone use the proxy.
`, "|", "`"),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@ -181,6 +208,7 @@ type Options struct {
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
Proxy string `config:"proxy"`
}
// Fs represents a remote storage server
@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
defer log.Trace(f, "remote=%q", remote)("")
// defer log.Trace(f, "remote=%q", remote)("")
return f.newObjectWithInfo(ctx, remote, nil)
}
@ -667,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil || pattern.isFile {
return nil, fs.ErrorDirNotFound
@ -684,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
defer log.Trace(f, "src=%+v", src)("")
// defer log.Trace(f, "src=%+v", src)("")
// Temporary Object under construction
o := &Object{
fs: f,
@ -737,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
// Mkdir creates the album if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
@ -761,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
defer log.Trace(f, "dir=%q")("err=%v", &err)
// defer log.Trace(f, "dir=%q")("err=%v", &err)
match, _, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
@ -834,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
defer log.Trace(o, "")("")
// defer log.Trace(o, "")("")
if !o.fs.opt.ReadSize || o.bytes >= 0 {
return o.bytes
}
@ -935,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
defer log.Trace(o, "")("")
// defer log.Trace(o, "")("")
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
@ -965,16 +993,20 @@ func (o *Object) downloadURL() string {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
defer log.Trace(o, "")("")
// defer log.Trace(o, "")("")
err = o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
return nil, err
}
url := o.downloadURL()
if o.fs.opt.Proxy != "" {
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.downloadURL(),
RootURL: url,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
@ -1067,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil || !pattern.isFile || !pattern.canUpload {
return errCantUpload

View file

@ -6,6 +6,7 @@ package local
import (
"context"
"fmt"
"path/filepath"
"runtime"
"github.com/go-darwin/apfs"
@ -22,7 +23,7 @@ import (
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
if runtime.GOOS != "darwin" || f.opt.NoClone {
return nil, fs.ErrorCantCopy
}
srcObj, ok := src.(*Object)
@ -30,6 +31,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't clone - not same remote type")
return nil, fs.ErrorCantCopy
}
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
return nil, fs.ErrorCantCopy
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
@ -44,11 +48,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
err = Clone(srcObj.path, f.localPath(remote))
srcPath := srcObj.path
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
srcPath, err = filepath.EvalSymlinks(srcPath)
if err != nil {
return nil, err
}
}
err = Clone(srcPath, f.localPath(remote))
if err != nil {
return nil, err
}
fs.Debugf(remote, "server-side cloned!")
// Set metadata if --metadata is in use
if meta != nil {

View file

@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) {
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
}
// Test corrupted on transfer
@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) {
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with different contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
b := bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
require.NoError(t, err)
@ -395,7 +394,6 @@ func TestMetadata(t *testing.T) {
assert.Equal(t, "wedges", m["potato"])
}
})
}
func TestFilter(t *testing.T) {
@ -572,4 +570,35 @@ func TestCopySymlink(t *testing.T) {
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
// Set fs into "-L/--copy-links" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst2"))
// Do copy from src into dst
src, err = f.NewObject(ctx, "src/link.txt")
require.NoError(t, err)
require.NotNil(t, src)
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a NON-symlink and it has the right contents
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
fi, err := os.Lstat(dstPath)
require.NoError(t, err)
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
want := fstest.NewItem("dst2/link.txt", "hello world", when)
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
// Test that copying a normal file also works
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
require.NoError(t, err)
require.NotNil(t, dst)
want = fstest.NewItem("dst2/file.txt", "hello world", when)
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
}

View file

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"net/http"
"slices"
"strings"
"time"
@ -14,7 +15,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
)
const (

View file

@ -942,7 +942,8 @@ func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
// Redirects have no body so don't report an error
if err != nil && resp.Header.Get("Location") == "" {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.ErrorInfo.Code == "" {
@ -1544,9 +1545,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
if f.driveType == driveTypePersonal {
return time.Millisecond
}
// While this is true for some OneDrive personal accounts, it
// isn't true for all of them. See #8101 for details
//
// if f.driveType == driveTypePersonal {
// return time.Millisecond
// }
return time.Second
}

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"slices"
"testing"
"time"
@ -16,7 +17,6 @@ import (
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
)
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v

View file

@ -404,6 +404,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var uInfo usersInfoResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "GET",
Path: "/users/info.json/" + f.session.SessionID,
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
usage = &fs.Usage{
Used: fs.NewUsageValue(uInfo.StorageUsed),
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
}
return usage, nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
@ -1147,6 +1173,7 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.ParentIDer = (*Object)(nil)

View file

@ -231,3 +231,10 @@ type permissions struct {
type uploadFileChunkReply struct {
TotalWritten int64 `json:"TotalWritten"`
}
// usersInfoResponse describes OpenDrive users/info.json response
type usersInfoResponse struct {
// This response contains many other values but these are the only ones currently in use
StorageUsed int64 `json:"StorageUsed,string"`
MaxStorage int64 `json:"MaxStorage,string"`
}

View file

@ -513,6 +513,72 @@ type RequestDecompress struct {
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------ authorization
// CaptchaToken is a response to requestCaptchaToken api call
type CaptchaToken struct {
CaptchaToken string `json:"captcha_token"`
ExpiresIn int64 `json:"expires_in"` // currently 300s
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
Expiry time.Time `json:"expiry,omitempty"`
URL string `json:"url,omitempty"` // a link for users to solve captcha
}
// expired reports whether the token is expired.
// t must be non-nil.
func (t *CaptchaToken) expired() bool {
if t.Expiry.IsZero() {
return false
}
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
func (t *CaptchaToken) Valid() bool {
return t != nil && t.CaptchaToken != "" && !t.expired()
}
// CaptchaTokenRequest is to request for captcha token
type CaptchaTokenRequest struct {
Action string `json:"action,omitempty"`
CaptchaToken string `json:"captcha_token,omitempty"`
ClientID string `json:"client_id,omitempty"`
DeviceID string `json:"device_id,omitempty"`
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
}
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
type CaptchaTokenMeta struct {
CaptchaSign string `json:"captcha_sign,omitempty"`
ClientVersion string `json:"client_version,omitempty"`
PackageName string `json:"package_name,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
UserName string `json:"username,omitempty"`
Email string `json:"email,omitempty"`
PhoneNumber string `json:"phone_number,omitempty"`
}
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
type Token struct {
TokenType string `json:"token_type"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
Sub string `json:"sub"`
}
// Expiry returns expiry from expires in, so it should be called on retrieval
// e must be non-nil.
func (e *Token) Expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
// ------------------------------------------------------------
// NOT implemented YET

View file

@ -3,8 +3,10 @@ package pikpak
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
@ -14,10 +16,13 @@ import (
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
@ -262,15 +267,20 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
if err != nil {
return
}
if src.Size() == 0 {
// If src is zero-length, the API will return
// Error "cid and file_size is required" (400)
// In this case, we can simply return cid == gcid
return cid, nil
}
params := url.Values{}
params.Set("cid", cid)
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/resource/cid",
Parameters: params,
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
Method: "GET",
Path: "/drive/v1/resource/cid",
Parameters: params,
}
info := struct {
@ -368,11 +378,23 @@ func calcGcid(r io.Reader, size int64) (string, error) {
return hex.EncodeToString(totalHash.Sum(nil)), nil
}
// unWrapObjectInfo returns the underlying Object unwrapped as much as
// possible or nil even if it is an OverrideRemote
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
if o, ok := oi.(fs.Object); ok {
return fs.UnWrapObject(o)
} else if do, ok := oi.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
return do.UnWrap()
}
return nil
}
// calcCid calculates Cid from source
//
// Cid is a simplified version of Gcid
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
srcObj := fs.UnWrapObjectInfo(src)
srcObj := unWrapObjectInfo(src)
if srcObj == nil {
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
}
@ -408,6 +430,8 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
return
}
// ------------------------------------------------------------ authorization
// randomly generates device id used for request header 'x-device-id'
//
// original javascript implementation
@ -428,3 +452,206 @@ func genDeviceID() string {
}
return string(base)
}
var md5Salt = []string{
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
timestamp = fmt.Sprint(time.Now().UnixMilli())
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
for _, salt := range md5Salt {
str = md5Sum(str + salt)
}
sign = "1." + str
return
}
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
req = &api.CaptchaTokenRequest{
Action: action,
CaptchaToken: oldToken, // can be empty initially
ClientID: clientID,
DeviceID: opt.DeviceID,
Meta: new(api.CaptchaTokenMeta),
}
switch action {
case "POST:/v1/auth/signin":
req.Meta.UserName = opt.Username
default:
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
req.Meta.CaptchaSign = captchaSign
req.Meta.Timestamp = timestamp
req.Meta.ClientVersion = clientVersion
req.Meta.PackageName = packageName
req.Meta.UserID = opt.UserID
}
return
}
// CaptchaTokenSource stores updated captcha tokens in the config file
type CaptchaTokenSource struct {
mu sync.Mutex
m configmap.Mapper
opt *Options
token *api.CaptchaToken
ctx context.Context
rst *pikpakClient
}
// initialize CaptchaTokenSource from rclone.conf if possible
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
token := new(api.CaptchaToken)
tokenString, ok := m.Get("captcha_token")
if !ok || tokenString == "" {
fs.Debugf(nil, "failed to read captcha token out of config file")
} else {
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
}
}
return &CaptchaTokenSource{
m: m,
opt: opt,
token: token,
ctx: ctx,
rst: newPikpakClient(getClient(ctx, opt), opt),
}
}
// requestToken retrieves captcha token from API
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
opts := rest.Opts{
Method: "POST",
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
}
var info *api.CaptchaToken
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
if err == nil && info.ExpiresIn != 0 {
// populate to Expiry
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
cts.token = info // update with a new one
}
return
}
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
oldToken := ""
if cts.token != nil {
oldToken = cts.token.CaptchaToken
}
action := "GET:/drive/v1/about"
if opts.RootURL == "" && opts.Path != "" {
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
} else if u, err := url.Parse(opts.RootURL); err == nil {
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
}
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
if err := cts.requestToken(cts.ctx, req); err != nil {
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
}
// put it into rclone.conf
tokenBytes, err := json.Marshal(cts.token)
if err != nil {
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
}
cts.m.Set("captcha_token", string(tokenBytes))
return cts.token.CaptchaToken, nil
}
// Invalidate resets existing captcha token for a forced refresh
func (cts *CaptchaTokenSource) Invalidate() {
cts.mu.Lock()
cts.token.CaptchaToken = ""
cts.mu.Unlock()
}
// Token returns a valid captcha token
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
cts.mu.Lock()
defer cts.mu.Unlock()
if cts.token.Valid() {
return cts.token.CaptchaToken, nil
}
return cts.refreshToken(opts)
}
// pikpakClient wraps rest.Client with a handle of captcha token
type pikpakClient struct {
opt *Options
client *rest.Client
captcha *CaptchaTokenSource
}
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
// * error handler
// * root url
// * default headers
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
for key, val := range map[string]string{
"Referer": "https://mypikpak.com/",
"x-client-id": clientID,
"x-client-version": clientVersion,
"x-device-id": opt.DeviceID,
// "x-device-model": "firefox%2F129.0",
// "x-device-name": "PC-Firefox",
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
// "x-net-work-type": "NONE",
// "x-os-version": "Win32",
// "x-platform-version": "1",
// "x-protocol-version": "301",
// "x-provider-name": "NONE",
// "x-sdk-version": "8.0.3",
} {
client.SetHeader(key, val)
}
return &pikpakClient{
client: client,
opt: opt,
}
}
// This should be called right after pikpakClient initialized
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
return c
}
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
if c.captcha != nil {
token, err := c.captcha.Token(opts)
if err != nil || token == "" {
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
}
if opts.ExtraHeaders == nil {
opts.ExtraHeaders = make(map[string]string)
}
opts.ExtraHeaders["x-captcha-token"] = token
}
return c.client.CallJSON(ctx, opts, request, response)
}
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
return c.client.Call(ctx, opts)
}

View file

@ -23,6 +23,7 @@ package pikpak
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
@ -51,6 +52,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/dircache"
@ -64,15 +66,17 @@ import (
// Constants
const (
rcloneClientID = "YNxT9w7GMdWvEOKa"
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
defaultUploadConcurrency = manager.DefaultUploadConcurrency
clientID = "YUMx5nI8ZU8Ap8pm"
clientVersion = "2.0.0"
packageName = "mypikpak.com"
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
defaultUploadConcurrency = manager.DefaultUploadConcurrency
)
// Globals
@ -85,43 +89,53 @@ var (
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
ClientID: clientID,
RedirectURL: oauthutil.RedirectURL,
}
)
// Returns OAuthOptions modified for pikpak
func pikpakOAuthOptions() []fs.Option {
opts := []fs.Option{}
for _, opt := range oauthutil.SharedOptions {
if opt.Name == config.ConfigClientID {
opt.Advanced = true
} else if opt.Name == config.ConfigClientSecret {
opt.Advanced = true
}
opts = append(opts, opt)
}
return opts
}
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
// override default client id/secret
if id, ok := m.Get("client_id"); ok && id != "" {
oauthConfig.ClientID = id
}
if secret, ok := m.Get("client_secret"); ok && secret != "" {
oauthConfig.ClientSecret = secret
if opt.Username == "" {
return errors.New("no username")
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
}
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
// new device id if necessary
if len(opt.DeviceID) != 32 {
opt.DeviceID = genDeviceID()
m.Set("device_id", opt.DeviceID)
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
}
opts := rest.Opts{
Method: "POST",
RootURL: "https://user.mypikpak.com/v1/auth/signin",
}
req := map[string]string{
"username": opt.Username,
"password": pass,
"client_id": clientID,
}
var token api.Token
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
_, err = rst.CallJSON(ctx, &opts, req, &token)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
rst.captcha.Invalidate()
_, err = rst.CallJSON(ctx, &opts, req, &token)
}
}
if err != nil {
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
}
t := &oauth2.Token{
AccessToken: token.AccessToken,
TokenType: token.TokenType,
RefreshToken: token.RefreshToken,
Expiry: token.Expiry(),
}
return oauthutil.PutToken(name, m, t, false)
}
@ -160,7 +174,7 @@ func init() {
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(pikpakOAuthOptions(), []fs.Option{{
Options: []fs.Option{{
Name: "user",
Help: "Pikpak username.",
Required: true,
@ -170,6 +184,18 @@ func init() {
Help: "Pikpak password.",
Required: true,
IsPassword: true,
}, {
Name: "device_id",
Help: "Device ID used for authorization.",
Advanced: true,
Sensitive: true,
}, {
Name: "user_agent",
Default: defaultUserAgent,
Advanced: true,
Help: fmt.Sprintf(`HTTP user agent for pikpak.
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
@ -248,7 +274,7 @@ this may help to speed up the transfers.`,
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@ -256,6 +282,9 @@ this may help to speed up the transfers.`,
type Options struct {
Username string `config:"user"`
Password string `config:"pass"`
UserID string `config:"user_id"` // only available during runtime
DeviceID string `config:"device_id"`
UserAgent string `config:"user_agent"`
RootFolderID string `config:"root_folder_id"`
UseTrash bool `config:"use_trash"`
TrashedOnly bool `config:"trashed_only"`
@ -271,11 +300,10 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
rst *rest.Client // the connection to the server
rst *pikpakClient // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
rootFolderID string // the id of the root folder
deviceID string // device id used for api requests
client *http.Client // authorized client
m configmap.Mapper
tokenMu *sync.Mutex // when renewing tokens
@ -429,6 +457,12 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
} else if apiErr.Reason == "file_space_not_enough" {
// "file_space_not_enough" (8): Storage space is not enough
return false, fserrors.FatalError(err)
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
// "captcha_invalid" (9): Verification code is invalid
// This error occurred on the POST:/drive/v1/files endpoint
// when a zero-byte file was uploaded with an invalid captcha token
f.rst.captcha.Invalidate()
return true, err
}
}
@ -452,13 +486,36 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
// Override few config settings and create a client
newCtx, ci := fs.AddConfig(ctx)
ci.UserAgent = opt.UserAgent
return fshttp.NewClient(newCtx)
}
// newClientWithPacer sets a new http/rest client with a pacer to Fs
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
var ts *oauthutil.TokenSource
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
if err != nil {
return fmt.Errorf("failed to create oauth client: %w", err)
}
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
token, err := ts.Token()
if err != nil {
return err
}
// parse user_id from oauth access token for later use
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
info := struct {
UserID string `json:"sub,omitempty"`
}{}
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
f.opt.UserID = info.UserID
}
}
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
return nil
}
@ -491,10 +548,19 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
CanHaveEmptyDirectories: true, // can have empty directories
NoMultiThreading: true, // can't have multiple threads downloading
}).Fill(ctx, f)
f.deviceID = genDeviceID()
// new device id if necessary
if len(f.opt.DeviceID) != 32 {
f.opt.DeviceID = genDeviceID()
m.Set("device_id", f.opt.DeviceID)
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
}
if err := f.newClientWithPacer(ctx); err != nil {
return nil, err
// re-authorize if necessary
if strings.Contains(err.Error(), "invalid_grant") {
return f, f.reAuthorize(ctx)
}
}
return f, nil
@ -1707,7 +1773,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
gcid, err := o.fs.getGcid(ctx, src)
if err != nil || gcid == "" {
fs.Debugf(o, "calculating gcid: %v", err)
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
// No buffering; directly calculate gcid from source
rc, err := srcObj.Open(ctx)
if err != nil {

View file

@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithLink(ctx, remote, nil)
_, err := tempF.newObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithLink(ctx, remote, nil)
return f.newObject(ctx, remote)
}
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
@ -516,35 +516,27 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
return link, nil
}
// readMetaDataForRemote reads the metadata from the remote
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
link, err := f.getObjectLink(ctx, remote)
if err != nil {
return nil, nil, err
}
// readMetaDataForLink reads the metadata from the remote
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
var err error
if err = f.pacer.Call(func() (bool, error) {
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
return shouldRetry(ctx, err)
}); err != nil {
return nil, nil, err
return nil, err
}
return link, fileSystemAttrs, nil
return fileSystemAttrs, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
// Return an Object from a path and link
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
if o.link != nil {
return nil
}
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
if err != nil {
return err
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
o.id = link.LinkID
@ -554,6 +546,10 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
o.mimetype = link.MIMEType
o.link = link
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
if err != nil {
return nil, err
}
if fileSystemAttrs != nil {
o.modTime = fileSystemAttrs.ModificationTime
o.originalSize = &fileSystemAttrs.Size
@ -561,23 +557,18 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
o.digests = &fileSystemAttrs.Digests
}
return nil
return o, nil
}
// Return an Object from a path
// Return an Object from a path only
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.readMetaData(ctx, link)
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
link, err := f.getObjectLink(ctx, remote)
if err != nil {
return nil, err
}
return o, nil
return f.newObjectWithLink(ctx, remote, link)
}
// List the objects and directories in dir into entries. The

View file

@ -2606,6 +2606,35 @@ knows about - please make a bug report if not.
`,
Default: fs.Tristate{},
Advanced: true,
}, {
Name: "directory_bucket",
Help: strings.ReplaceAll(`Set to use AWS Directory Buckets
If you are using an AWS Directory Bucket then set this flag.
This will ensure no |Content-Md5| headers are sent and ensure |ETag|
headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will
be set on all objects whether single or multipart uploaded.
This also sets |no_check_bucket = true|.
Note that Directory Buckets do not support:
- Versioning
- |Content-Encoding: gzip|
Rclone limitations with Directory Buckets:
- rclone does not support creating Directory Buckets with |rclone mkdir|
- ... or removing them with |rclone rmdir| yet
- Directory Buckets do not appear when doing |rclone lsf| at the top level.
- Rclone can't remove auto created directories yet. In theory this should
work with |directory_markers = true| but it doesn't.
- Directories don't seem to appear in recursive (ListR) listings.
`, "|", "`"),
Default: false,
Advanced: true,
Provider: "AWS",
}, {
Name: "sdk_log_mode",
Help: strings.ReplaceAll(`Set to debug the SDK
@ -2780,6 +2809,7 @@ type Options struct {
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
DirectoryBucket bool `config:"directory_bucket"`
}
// Fs represents a remote s3 server
@ -3052,9 +3082,16 @@ func (s3logger) Logf(classification logging.Classification, format string, v ...
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
ci := fs.GetConfig(ctx)
var awsConfig aws.Config
// Make the default static auth
v := aws.Credentials{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
// Try to fill in the config from the environment if env_auth=true
if opt.EnvAuth {
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
configOpts := []func(*awsconfig.LoadOptions) error{}
// Set the name of the profile if supplied
if opt.Profile != "" {
@ -3079,13 +3116,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
case opt.SecretAccessKey == "":
return nil, errors.New("secret_access_key not found")
default:
// Make the static auth
v := aws.Credentials{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
// static credentials are already set
}
}
@ -3547,6 +3578,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// MD5 digest of their object data.
f.etagIsNotMD5 = true
}
if opt.DirectoryBucket {
// Objects uploaded to directory buckets appear to have random ETags
//
// This doesn't appear to be documented
f.etagIsNotMD5 = true
// The normal API doesn't work for creating directory buckets, so don't try
f.opt.NoCheckBucket = true
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
@ -6028,6 +6067,10 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
}
if w.f.opt.DirectoryBucket {
// Directory buckets do not support "Content-Md5" header
uploadPartReq.ContentMD5 = nil
}
var uout *s3.UploadPartOutput
err = w.f.pacer.Call(func() (bool, error) {
// rewind the reader on retry and after reading md5
@ -6304,7 +6347,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the Etag is not an MD5, eg when using SSE/SSE-C
// - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets
// provided checksums aren't disabled
ui.req.Metadata[metaMD5Hash] = md5sumBase64
}
@ -6319,7 +6362,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
if size >= 0 {
ui.req.ContentLength = &size
}
if md5sumBase64 != "" {
if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket {
ui.req.ContentMD5 = &md5sumBase64
}
if o.fs.opt.RequesterPays {

View file

@ -14,21 +14,30 @@ import (
"io"
"net/http"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
)
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) {
// Not found. Can be returned by NextCloud when merging chunks of an upload.
if resp != nil && resp.StatusCode == 404 {
if *wasLocked {
// Assume a 404 error after we've received a 423 error is actually a success
return false, nil
}
return true, err
}
// 423 LOCKED
if resp != nil && resp.StatusCode == 423 {
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
*wasLocked = true
fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime)
time.Sleep(*sleepTime)
*sleepTime *= 2
return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
}
return f.shouldRetry(ctx, resp, err)
@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs
}
opts.ExtraHeaders = o.extraHeaders(ctx, src)
opts.ExtraHeaders["Destination"] = destinationURL.String()
sleepTime := 5 * time.Second
wasLocked := false
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked)
})
if err != nil {
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)

View file

@ -27,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error {
return nil
}
// User is a Zoho user we are only interested in the ZUID here
type User struct {
// OAuthUser is a Zoho user we are only interested in the ZUID here
type OAuthUser struct {
FirstName string `json:"First_Name"`
Email string `json:"Email"`
LastName string `json:"Last_Name"`
@ -36,12 +36,41 @@ type User struct {
ZUID int64 `json:"ZUID"`
}
// TeamWorkspace represents a Zoho Team or workspace
// UserInfoResponse is returned by the user info API.
type UserInfoResponse struct {
Data struct {
ID string `json:"id"`
Type string `json:"users"`
Attributes struct {
EmailID string `json:"email_id"`
Edition string `json:"edition"`
} `json:"attributes"`
} `json:"data"`
}
// PrivateSpaceInfo gives basic information about a users private folder.
type PrivateSpaceInfo struct {
Data struct {
ID string `json:"id"`
Type string `json:"string"`
} `json:"data"`
}
// CurrentTeamInfo gives information about the current user in a team.
type CurrentTeamInfo struct {
Data struct {
ID string `json:"id"`
Type string `json:"string"`
}
}
// TeamWorkspace represents a Zoho Team, Workspace or Private Space
// It's actually a VERY large json object that differs between
// Team and Workspace but we are only interested in some fields
// that both of them have so we can use the same struct for both
// Team and Workspace and Private Space but we are only interested in some fields
// that all of them have so we can use the same struct.
type TeamWorkspace struct {
ID string `json:"id"`
Type string `json:"type"`
Attributes struct {
Name string `json:"name"`
Created Time `json:"created_time_in_millisecond"`
@ -49,7 +78,8 @@ type TeamWorkspace struct {
} `json:"attributes"`
}
// TeamWorkspaceResponse is the response by the list teams api
// TeamWorkspaceResponse is the response by the list teams API, list workspace API
// or list team private spaces API.
type TeamWorkspaceResponse struct {
TeamWorkspace []TeamWorkspace `json:"data"`
}
@ -180,11 +210,38 @@ func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
return &ufi, nil
}
// LargeUploadInfo is once again a slightly different version of UploadInfo
// returned as part of an LargeUploadResponse by the large file upload API.
type LargeUploadInfo struct {
Attributes struct {
ParentID string `json:"parent_id"`
FileName string `json:"file_name"`
RessourceID string `json:"resource_id"`
FileInfo string `json:"file_info"`
} `json:"attributes"`
}
// GetUploadFileInfo decodes the embedded FileInfo
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
var ufi UploadFileInfo
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
if err != nil {
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
}
return &ufi, nil
}
// UploadResponse is the response to a file Upload
type UploadResponse struct {
Uploads []UploadInfo `json:"data"`
}
// LargeUploadResponse is the response returned by large file upload API.
type LargeUploadResponse struct {
Uploads []LargeUploadInfo `json:"data"`
Status string `json:"status"`
}
// WriteMetadataRequest is used to write metadata for a
// single item
type WriteMetadataRequest struct {

View file

@ -14,6 +14,7 @@ import (
"strings"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
@ -36,9 +37,11 @@ const (
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
maxSleep = 60 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
configRootID = "root_folder_id"
defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB
)
// Globals
@ -50,6 +53,7 @@ var (
"WorkDrive.team.READ",
"WorkDrive.workspace.READ",
"WorkDrive.files.ALL",
"ZohoFiles.files.ALL",
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
@ -61,6 +65,8 @@ var (
RedirectURL: oauthutil.RedirectLocalhostURL,
}
rootURL = "https://workdrive.zoho.eu/api/v1"
downloadURL = "https://download.zoho.eu/v1/workdrive"
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
accountsURL = "https://accounts.zoho.eu"
)
@ -79,7 +85,7 @@ func init() {
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err)
}
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
@ -88,12 +94,12 @@ func init() {
switch config.State {
case "":
return oauthutil.ConfigOut("teams", &oauthutil.Options{
return oauthutil.ConfigOut("type", &oauthutil.Options{
OAuth2Config: oauthConfig,
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
})
case "teams":
case "type":
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
@ -108,24 +114,43 @@ func init() {
}
}
authSrv, apiSrv, err := getSrvs()
_, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
// Get the user Info
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
userInfo, err := getUserInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
// If personal Edition only one private Space is available. Directly configure that.
if userInfo.Data.Attributes.Edition == "PERSONAL" {
return fs.ConfigResult("private_space", userInfo.Data.ID)
}
// Otherwise go to team selection
return fs.ConfigResult("team", userInfo.Data.ID)
case "private_space":
_, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
workspace := workspaces[i]
return workspace.ID, workspace.Name
})
case "team":
_, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
// Get the teams
teams, err := listTeams(ctx, user.ZUID, apiSrv)
teams, err := listTeams(ctx, config.Result, apiSrv)
if err != nil {
return nil, err
}
@ -143,9 +168,19 @@ func init() {
if err != nil {
return nil, err
}
currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv)
if err != nil {
return nil, err
}
privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv)
if err != nil {
return nil, err
}
workspaces = append(workspaces, privateSpaces...)
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
workspace := workspaces[i]
return workspace.ID, workspace.Attributes.Name
return workspace.ID, workspace.Name
})
case "workspace_end":
workspaceID := config.Result
@ -179,7 +214,13 @@ browser.`,
}, {
Value: "com.au",
Help: "Australia",
}}}, {
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to large file upload api (>= 10 MiB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@ -193,6 +234,7 @@ browser.`,
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
RootFolderID string `config:"root_folder_id"`
Region string `config:"region"`
Enc encoder.MultiEncoder `config:"encoding"`
@ -200,13 +242,15 @@ type Options struct {
// Fs represents a remote workdrive
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
downloadsrv *rest.Client // the connection to the download server
uploadsrv *rest.Client // the connection to the upload server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
}
// Object describes a Zoho WorkDrive object
@ -229,6 +273,8 @@ func setupRegion(m configmap.Mapper) error {
return errors.New("no region set")
}
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
@ -237,11 +283,63 @@ func setupRegion(m configmap.Mapper) error {
// ------------------------------------------------------------
func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) {
type workspaceInfo struct {
ID string
Name string
}
func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) {
var userInfo api.UserInfoResponse
opts := rest.Opts{
Method: "GET",
Path: "/users/me",
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
}
_, err := srv.CallJSON(ctx, &opts, nil, &userInfo)
if err != nil {
return nil, err
}
return &userInfo, nil
}
func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) {
var currentTeamInfo api.CurrentTeamInfo
opts := rest.Opts{
Method: "GET",
Path: "/teams/" + teamID + "/currentuser",
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
}
_, err := srv.CallJSON(ctx, &opts, nil, &currentTeamInfo)
if err != nil {
return nil, err
}
return &currentTeamInfo, err
}
func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) {
var privateSpaceListResponse api.TeamWorkspaceResponse
opts := rest.Opts{
Method: "GET",
Path: "/users/" + teamUserID + "/privatespace",
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
}
_, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse)
if err != nil {
return nil, err
}
workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace))
for _, workspace := range privateSpaceListResponse.TeamWorkspace {
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"})
}
return workspaceList, err
}
func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) {
var teamList api.TeamWorkspaceResponse
opts := rest.Opts{
Method: "GET",
Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams",
Path: "/users/" + zuid + "/teams",
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
}
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
@ -251,18 +349,24 @@ func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWork
return teamList.TeamWorkspace, nil
}
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) {
var workspaceList api.TeamWorkspaceResponse
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) {
var workspaceListResponse api.TeamWorkspaceResponse
opts := rest.Opts{
Method: "GET",
Path: "/teams/" + teamID + "/workspaces",
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
}
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceList)
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse)
if err != nil {
return nil, err
}
return workspaceList.TeamWorkspace, nil
workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace))
for _, workspace := range workspaceListResponse.TeamWorkspace {
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name})
}
return workspaceList, nil
}
// --------------------------------------------------------------
@ -285,13 +389,20 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
}
authRetry := false
// Bail out early if we are missing OAuth Scopes.
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") {
fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.")
return false, err
}
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
if resp != nil && resp.StatusCode == 429 {
fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err)
time.Sleep(60 * time.Second)
err = pacer.RetryAfterError(err, 60*time.Second)
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60)
return true, err
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@ -389,6 +500,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err := configstruct.Set(m, opt); err != nil {
return nil, err
}
if opt.UploadCutoff < defaultUploadCutoff {
return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff))
}
err := setupRegion(m)
if err != nil {
return nil, err
@ -401,11 +517,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@ -643,9 +761,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
return
}
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
opts := rest.Opts{
Method: "POST",
Path: "/stream/upload",
Body: in,
ContentLength: &size,
ContentType: "application/octet-stream",
Options: options,
ExtraHeaders: map[string]string{
"x-filename": url.QueryEscape(name),
"x-parent_id": parent,
"override-name-exist": "true",
"upload-id": uuid.New().String(),
"x-streammode": "1",
},
}
var err error
var resp *http.Response
var uploadResponse *api.LargeUploadResponse
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("upload large error: %v", err)
}
if len(uploadResponse.Uploads) != 1 {
return nil, errors.New("upload: invalid response")
}
upload := uploadResponse.Uploads[0]
uploadInfo, err := upload.GetUploadFileInfo()
if err != nil {
return nil, fmt.Errorf("upload error: %w", err)
}
// Fill in the api.Item from the api.UploadFileInfo
var info api.Item
info.ID = upload.Attributes.RessourceID
info.Attributes.Name = upload.Attributes.FileName
// info.Attributes.Type = not used
info.Attributes.IsFolder = false
// info.Attributes.CreatedTime = not used
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
// info.Attributes.UploadedTime = 0 not used
info.Attributes.StorageInfo.Size = uploadInfo.Size
info.Attributes.StorageInfo.FileCount = 0
info.Attributes.StorageInfo.FolderCount = 0
return &info, nil
}
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
params := url.Values{}
params.Set("filename", name)
params.Set("filename", url.QueryEscape(name))
params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
@ -705,21 +875,40 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
size := src.Size()
remote := src.Remote()
existingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
size := src.Size()
remote := src.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
// use normal upload API for small sizes (<10MiB)
if size < int64(f.opt.UploadCutoff) {
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
}
// large file API otherwise
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
default:
return nil, err
}
// Upload the file
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
}
// Mkdir creates the container if it doesn't exist
@ -1159,7 +1348,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.downloadsrv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@ -1183,11 +1372,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Overwrite the old file
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
// use normal upload API for small sizes (<10MiB)
if size < int64(o.fs.opt.UploadCutoff) {
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return err
}
return o.setMetaData(info)
}
// large file API otherwise
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
if err != nil {
return err
}
return o.setMetaData(info)
}

View file

@ -11,7 +11,8 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestZoho:",
NilObject: (*zoho.Object)(nil),
RemoteName: "TestZoho:",
SkipInvalidUTF8: true,
NilObject: (*zoho.Object)(nil),
})
}

View file

@ -21,12 +21,12 @@ def find_backends():
def output_docs(backend, out, cwd):
"""Output documentation for backend options to out"""
out.flush()
subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out)
subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out)
def output_backend_tool_docs(backend, out, cwd):
"""Output documentation for backend tool to out"""
out.flush()
subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
def alter_doc(backend):
"""Alter the documentation for backend"""

View file

@ -15,6 +15,7 @@ import (
"path/filepath"
"regexp"
"runtime"
"slices"
"sort"
"strconv"
"strings"
@ -207,15 +208,16 @@ type bisyncTest struct {
parent1 fs.Fs
parent2 fs.Fs
// global flags
argRemote1 string
argRemote2 string
noCompare bool
noCleanup bool
golden bool
debug bool
stopAt int
TestFn bisync.TestFunc
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
argRemote1 string
argRemote2 string
noCompare bool
noCleanup bool
golden bool
debug bool
stopAt int
TestFn bisync.TestFunc
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank
}
var color = bisync.Color
@ -946,6 +948,10 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") {
b.t.Skip("skipping test as remote does not support empty dirs")
}
ignoreHashBackends := []string{"TestWebdavNextcloud", "TestWebdavOwncloud", "TestAzureFiles"} // backends that support hashes but allow them to be blank
if slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs1.Name(), prefix) }) || slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs2.Name(), prefix) }) {
b.ignoreBlankHash = true
}
if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported {
if b.testCase != "nomodtime" {
b.t.Skip("skipping test as at least one remote does not support setting modtime")
@ -1551,6 +1557,12 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) {
logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe)
}
if b.ignoreBlankHash {
logReplacements = append(logReplacements,
`^.*hash is missing.*$`, dropMe,
`^.*not equal on recheck.*$`, dropMe,
)
}
rep := logReplacements
if b.testCase == "dry_run" {
rep = append(rep, dryrunReplacements...)

View file

@ -20,6 +20,7 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/spf13/cobra"
@ -193,7 +194,7 @@ var commandDefinition = &cobra.Command{
cmd.Run(false, true, command, func() error {
err := Bisync(ctx, fs1, fs2, &opt)
if err == ErrBisyncAborted {
os.Exit(2)
return fserrors.FatalError(err)
}
return err
})

View file

@ -10,6 +10,7 @@ import (
"io"
"os"
"regexp"
"slices"
"sort"
"strconv"
"strings"
@ -21,7 +22,6 @@ import (
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"golang.org/x/exp/slices"
)
// ListingHeader defines first line of a listing

View file

@ -23,7 +23,7 @@ import (
"github.com/rclone/rclone/lib/terminal"
)
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
// ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code
var ErrBisyncAborted = errors.New("bisync aborted")
// bisyncRun keeps bisync runtime state

View file

@ -50,7 +50,6 @@ var (
version bool
// Errors
errorCommandNotFound = errors.New("command not found")
errorUncategorized = errors.New("uncategorized error")
errorNotEnoughArguments = errors.New("not enough arguments")
errorTooManyArguments = errors.New("too many arguments")
)
@ -495,8 +494,6 @@ func resolveExitCode(err error) {
os.Exit(exitcode.DirNotFound)
case errors.Is(err, fs.ErrorObjectNotFound):
os.Exit(exitcode.FileNotFound)
case errors.Is(err, errorUncategorized):
os.Exit(exitcode.UncategorizedError)
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
os.Exit(exitcode.TransferExceeded)
case errors.Is(err, fssync.ErrorMaxDurationReached):
@ -507,8 +504,10 @@ func resolveExitCode(err error) {
os.Exit(exitcode.NoRetryError)
case fserrors.IsFatalError(err):
os.Exit(exitcode.FatalError)
default:
case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments):
os.Exit(exitcode.UsageError)
default:
os.Exit(exitcode.UncategorizedError)
}
}
@ -536,6 +535,7 @@ func Main() {
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
}
fs.Fatalf(nil, "Fatal error: %v", err)
fs.Logf(nil, "Fatal error: %v", err)
os.Exit(exitcode.UsageError)
}
}

View file

@ -42,7 +42,9 @@ When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
# OS X
#... or on some systems
fusermount3 -u /path/to/local/mount
# OS X or Linux when using nfsmount
umount /path/to/local/mount
The umount operation can fail, for example when the mountpoint is busy.
@ -386,9 +388,9 @@ Note that systemd runs mount units without any environment variables including
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
and you should provide `--config` and `--cache-dir` explicitly as absolute
paths via rclone arguments.
Since mounting requires the `fusermount` program, rclone will use the fallback
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
is present on this PATH.
Since mounting requires the `fusermount` or `fusermount3` program,
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
### Rclone as Unix mount helper

View file

@ -251,6 +251,15 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
case "vfs-used-is-size":
vfsOpt.UsedIsSize, err = opt.GetBool(key)
case "vfs-read-chunk-streams":
intVal, err = opt.GetInt64(key)
if err == nil {
if intVal >= 0 && intVal <= math.MaxInt {
vfsOpt.ChunkStreams = int(intVal)
} else {
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
}
}
// unprefixed vfs options
case "no-modtime":

View file

@ -174,7 +174,7 @@ func TestCmdTest(t *testing.T) {
// Test error and error output
out, err = rclone("version", "--provoke-an-error")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "exit status 1")
assert.Contains(t, err.Error(), "exit status 2")
assert.Contains(t, out, "Error: unknown flag")
}

View file

@ -6,7 +6,9 @@ package cmdtest
import (
"os"
"regexp"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@ -344,4 +346,42 @@ func TestEnvironmentVariables(t *testing.T) {
env = ""
out, err = rcloneEnv(env, "version", "-vv", "--use-json-log")
jsonLogOK()
// Find all the File filter lines in out and return them
parseFileFilters := func(out string) (extensions []string) {
// Match: - (^|/)[^/]*\.jpg$
find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`)
for _, line := range strings.Split(out, "\n") {
if m := find.FindStringSubmatch(line); m != nil {
extensions = append(extensions, m[1])
}
}
return extensions
}
// Make sure that multiple valued (stringArray) environment variables are handled properly
env = ``
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif")
require.NoError(t, err)
assert.Equal(t, []string{"gif", "tif"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE=*.jpg`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif")
require.NoError(t, err)
assert.Equal(t, []string{"jpg", "gif"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE=*.jpg,*.png`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif")
require.NoError(t, err)
assert.Equal(t, []string{"jpg", "png", "gif", "tif"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE="*.jpg","*.png"`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters")
require.NoError(t, err)
assert.Equal(t, []string{"jpg", "png"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE="*.,,,","*.png"`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters")
require.NoError(t, err)
assert.Equal(t, []string{",,,", "png"}, parseFileFilters(out))
}

View file

@ -889,3 +889,9 @@ put them back in again.` >}}
* Mathieu Moreau <mrx23dot@users.noreply.github.com>
* fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com>
* Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com>
* Divyam <47589864+divyam234@users.noreply.github.com>
* ttionya <ttionya@users.noreply.github.com>
* quiescens <quiescens@gmail.com>
* rishi.sridhar <rishi.sridhar@zohocorp.com>
* Lawrence Murray <lawrence@indii.org>
* Leandro Piccilli <leandro.piccilli@thalesgroup.com>

View file

@ -180,6 +180,13 @@ If the resource has multiple user-assigned identities you will need to
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
section](#use_msi).
If you are operating in disconnected clouds, or private clouds such as
Azure Stack you may want to set `disable_instance_discovery = true`.
This determines whether rclone requests Microsoft Entra instance
metadata from `https://login.microsoft.com/` before authenticating.
Setting this to `true` will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
Credentials created with the `az` tool can be picked up using `env_auth`.
@ -290,6 +297,16 @@ be explicitly specified using exactly one of the `msi_object_id`,
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
set, this is is equivalent to using `env_auth`.
#### Azure CLI tool `az` {#use_az}
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the `az` CLI on a host with
a System Managed Identity that you do not want to use.
Don't set `env_auth` at the same time.
#### Anonymous {#anonymous}
If you want to access resources with public anonymous access then set

View file

@ -968,12 +968,15 @@ that while concurrent bisync runs are allowed, _be very cautious_
that there is no overlap in the trees being synched between concurrent runs,
lest there be replicated files, deleted files and general mayhem.
### Return codes
### Exit codes
`rclone bisync` returns the following codes to calling program:
- `0` on a successful run,
- `1` for a non-critical failing run (a rerun may be successful),
- `2` for a critically aborted run (requires a `--resync` to recover).
- `2` on syntax or usage error,
- `7` for a critically aborted run (requires a `--resync` to recover).
See also the section about [exit codes](/docs/#exit-code) in main docs.
### Graceful Shutdown

View file

@ -5,6 +5,25 @@ description: "Rclone Changelog"
# Changelog
## v1.68.1 - 2024-09-24
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)
* Bug Fixes
* build: Fix docker release build (ttionya)
* doc fixes (Nick Craig-Wood, Pawel Palucha)
* fs
* Fix `--dump filters` not always appearing (Nick Craig-Wood)
* Fix setting `stringArray` config values from environment variables (Nick Craig-Wood)
* rc: Fix default value of `--metrics-addr` (Nick Craig-Wood)
* serve docker: Add missing `vfs-read-chunk-streams` option in docker volume driver (Divyam)
* Onedrive
* Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood)
* Pikpak
* Fix login issue where token retrieval fails (wiserain)
* S3
* Fix rclone ignoring static credentials when `env_auth=true` (Nick Craig-Wood)
## v1.68.0 - 2024-09-08
[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0)

View file

@ -2868,9 +2868,9 @@ messages may not be valid after the retry. If rclone has done a retry
it will log a high priority message if the retry was successful.
### List of exit codes ###
* `0` - success
* `1` - Syntax or usage error
* `2` - Error not otherwise categorised
* `0` - Success
* `1` - Error not otherwise categorised
* `2` - Syntax or usage error
* `3` - Directory not found
* `4` - File not found
* `5` - Temporary error (one that more retries might fix) (Retry errors)
@ -2911,6 +2911,22 @@ so they take exactly the same form.
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
Options that can appear multiple times (type `stringArray`) are
treated slighly differently as environment variables can only be
defined once. In order to allow a simple mechanism for adding one or
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
string. For example
| Environment Variable | Equivalent options |
|----------------------|--------------------|
| `RCLONE_EXCLUDE="*.jpg"` | `--exclude "*.jpg"` |
| `RCLONE_EXCLUDE="*.jpg,*.png"` | `--exclude "*.jpg"` `--exclude "*.png"` |
| `RCLONE_EXCLUDE='"*.jpg","*.png"'` | `--exclude "*.jpg"` `--exclude "*.png"` |
| `RCLONE_EXCLUDE='"/directory with comma , in it /**"'` | `--exclude "/directory with comma , in it /**" |
If `stringArray` options are defined as environment variables **and**
options on the command line then all the values will be used.
### Config file ###
You can set defaults for values in the config file on an individual

View file

@ -363,6 +363,20 @@ Properties:
- Type: string
- Required: false
#### --gcs-access-token
Short-lived access token.
Leave blank normally.
Needed only if you want use short-lived access tokens instead of interactive login.
Properties:
- Config: access_token
- Env Var: RCLONE_GCS_ACCESS_TOKEN
- Type: string
- Required: false
#### --gcs-anonymous
Access public buckets and objects without credentials.

View file

@ -502,12 +502,18 @@ is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115)
**The current google API does not allow photos to be downloaded at original resolution. This is very important if you are, for example, relying on "Google Photos" as a backup of your photos. You will not be able to use rclone to redownload original images. You could use 'google takeout' to recover the original photos as a last resort**
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
headless browser to download images in full resolution.
### Downloading Videos
When videos are downloaded they are downloaded in a really compressed
version of the video compared to downloading it via the Google Photos
web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044).
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
headless browser to download images in full resolution.
### Duplicates
If a file name is duplicated in a directory then rclone will add the

View file

@ -521,7 +521,7 @@ upon backend-specific capabilities.
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No |
| Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes |
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |

View file

@ -401,6 +401,38 @@ there for more details.
Setting this flag increases the chance for undetected upload failures.
### Increasing performance
#### Using server-side copy
If you are copying objects between S3 buckets in the same region, you should
use server-side copy.
This is much faster than downloading and re-uploading the objects, as no data is transferred.
For rclone to use server-side copy, you must use the same remote for the source and destination.
rclone copy s3:source-bucket s3:destination-bucket
When using server-side copy, the performance is limited by the rate at which rclone issues
API requests to S3.
See below for how to increase the number of API requests rclone makes.
#### Increasing the rate of API requests
You can increase the rate of API requests to S3 by increasing the parallelism using `--transfers` and `--checkers`
options.
Rclone uses a very conservative defaults for these settings, as not all providers support high rates of requests.
Depending on your provider, you can increase significantly the number of transfers and checkers.
For example, with AWS S3, if you can increase the number of checkers to values like 200.
If you are doing a server-side copy, you can also increase the number of transfers to 200.
rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket
You will need to experiment with these values to find the optimal settings for your setup.
### Versions
When bucket versioning is enabled (this can be done with rclone with
@ -2262,6 +2294,21 @@ You can also do this entirely on the command line
This is the provider used as main example and described in the [configuration](#configuration) section above.
### AWS Directory Buckets
From rclone v1.69 [Directory Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
are supported.
You will need to set the `directory_buckets = true` config parameter
or use `--s3-directory-buckets`.
Note that rclone cannot yet:
- Create directory buckets
- List directory buckets
See [the --s3-directory-buckets flag](#s3-directory-buckets) for more info
### AWS Snowball Edge
[AWS Snowball](https://aws.amazon.com/snowball/) is a hardware

View file

@ -61,3 +61,4 @@ Thank you very much to our sponsors:
{{< sponsor src="/img/logos/warp.svg" width="300" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}

View file

@ -224,6 +224,17 @@ Properties:
- Type: string
- Required: false
#### --zoho-upload-cutoff
Cutoff for switching to large file upload api (>= 10 MiB).
Properties:
- Config: upload_cutoff
- Env Var: RCLONE_ZOHO_UPLOAD_CUTOFF
- Type: SizeSuffix
- Default: 10Mi
#### --zoho-encoding
The encoding for the backend.

View file

@ -1 +1 @@
v1.68.0
v1.69.0

View file

@ -35,12 +35,13 @@ func (tb *tokenBucket) startSignalHandler() {
tb.toggledOff = !tb.toggledOff
tb.curr, tb.prev = tb.prev, tb.curr
s := "disabled"
s, limit := "disabled", "off"
if !tb.curr._isOff() {
s = "enabled"
limit = tb.currLimit.Bandwidth.String()
}
fs.Logf(nil, "Bandwidth limit %s by user", s)
fs.Logf(nil, "Bandwidth limit %s by user (now %s)", s, limit)
}()
}
}()

View file

@ -41,7 +41,12 @@ type tokenBucket struct {
//
// Call with lock held
func (bs *buckets) _isOff() bool { //nolint:unused // Don't include unused when running golangci-lint in case its on windows where this is not called
return bs[0] == nil
for i := range bs {
if bs[i] != nil {
return false
}
}
return true
}
// Disable the limits

111
fs/cache/cache.go vendored
View file

@ -4,6 +4,7 @@ package cache
import (
"context"
"runtime"
"strings"
"sync"
"github.com/rclone/rclone/fs"
@ -12,10 +13,11 @@ import (
)
var (
once sync.Once // creation
c *cache.Cache
mu sync.Mutex // mutex to protect remap
remap = map[string]string{} // map user supplied names to canonical names
once sync.Once // creation
c *cache.Cache
mu sync.Mutex // mutex to protect remap
remap = map[string]string{} // map user supplied names to canonical names - [fsString]canonicalName
childParentMap = map[string]string{} // tracks a one-to-many relationship between parent dirs and their direct children files - [child]parent
)
// Create the cache just once
@ -57,6 +59,39 @@ func addMapping(fsString, canonicalName string) {
mu.Unlock()
}
// addChild tracks known file (child) to directory (parent) relationships.
// Note that the canonicalName of a child will always equal that of its parent,
// but not everything with an equal canonicalName is a child.
// It could be an alias or overridden version of a directory.
func addChild(child, parent string) {
if child == parent {
return
}
mu.Lock()
childParentMap[child] = parent
mu.Unlock()
}
// returns true if name is definitely known to be a child (i.e. a file, not a dir).
// returns false if name is a dir or if we don't know.
func isChild(child string) bool {
mu.Lock()
_, found := childParentMap[child]
mu.Unlock()
return found
}
// ensures that we return fs.ErrorIsFile when necessary
func getError(fsString string, err error) error {
if err != nil && err != fs.ErrorIsFile {
return err
}
if isChild(fsString) {
return fs.ErrorIsFile
}
return nil
}
// GetFn gets an fs.Fs named fsString either from the cache or creates
// it afresh with the create function
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
@ -69,31 +104,39 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
created = ok
return f, ok, err
})
f, ok := value.(fs.Fs)
if err != nil && err != fs.ErrorIsFile {
if ok {
return f, err // for possible future uses of PutErr
}
return nil, err
}
f = value.(fs.Fs)
// Check we stored the Fs at the canonical name
if created {
canonicalName := fs.ConfigString(f)
if canonicalName != canonicalFsString {
// Note that if err == fs.ErrorIsFile at this moment
// then we can't rename the remote as it will have the
// wrong error status, we need to add a new one.
if err == nil {
if err == nil { // it's a dir
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName)
if found {
f = value.(fs.Fs)
}
addMapping(canonicalFsString, canonicalName)
} else {
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
Put(canonicalName, f)
} else { // it's a file
// the fs we cache is always the file's parent, never the file,
// but we use the childParentMap to return the correct error status based on the fsString passed in.
fs.Debugf(nil, "fs cache: renaming child cache item %q to be canonical for parent %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName) // rename the file entry to parent
if found {
f = value.(fs.Fs) // if parent already exists, use it
}
Put(canonicalName, f) // force err == nil for the cache
addMapping(canonicalFsString, canonicalName) // note the fsString-canonicalName connection for future lookups
addChild(fsString, canonicalName) // note the file-directory connection for future lookups
}
}
}
return f, err
return f, getError(fsString, err) // ensure fs.ErrorIsFile is returned when necessary
}
// Pin f into the cache until Unpin is called
@ -111,7 +154,6 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
runtime.SetFinalizer(x, func(_ interface{}) {
Unpin(f)
})
}
// Unpin f from the cache
@ -174,6 +216,9 @@ func PutErr(fsString string, f fs.Fs, err error) {
canonicalName := fs.ConfigString(f)
c.PutErr(canonicalName, f, err)
addMapping(fsString, canonicalName)
if err == fs.ErrorIsFile {
addChild(fsString, canonicalName)
}
}
// Put puts an fs.Fs named fsString into the cache
@ -186,6 +231,7 @@ func Put(fsString string, f fs.Fs) {
// Returns number of entries deleted
func ClearConfig(name string) (deleted int) {
createOnFirstUse()
ClearMappingsPrefix(name)
return c.DeletePrefix(name + ":")
}
@ -193,6 +239,7 @@ func ClearConfig(name string) (deleted int) {
func Clear() {
createOnFirstUse()
c.Clear()
ClearMappings()
}
// Entries returns the number of entries in the cache
@ -200,3 +247,39 @@ func Entries() int {
createOnFirstUse()
return c.Entries()
}
// ClearMappings removes everything from remap and childParentMap
func ClearMappings() {
mu.Lock()
defer mu.Unlock()
remap = map[string]string{}
childParentMap = map[string]string{}
}
// ClearMappingsPrefix deletes all mappings to parents with given prefix
//
// Returns number of entries deleted
func ClearMappingsPrefix(prefix string) (deleted int) {
mu.Lock()
do := func(mapping map[string]string) {
for key, val := range mapping {
if !strings.HasPrefix(val, prefix) {
continue
}
delete(mapping, key)
deleted++
}
}
do(remap)
do(childParentMap)
mu.Unlock()
return deleted
}
// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache
//
// Each entry is counted only once, regardless of entry.pinCount
func EntriesWithPinCount() (pinned, unpinned int) {
createOnFirstUse()
return c.EntriesWithPinCount()
}

View file

@ -24,7 +24,7 @@ func mockNewFs(t *testing.T) func(ctx context.Context, path string) (fs.Fs, erro
switch path {
case "mock:/":
return mockfs.NewFs(ctx, "mock", "/", nil)
case "mock:/file.txt", "mock:file.txt":
case "mock:/file.txt", "mock:file.txt", "mock:/file2.txt", "mock:file2.txt":
fMock, err := mockfs.NewFs(ctx, "mock", "/", nil)
require.NoError(t, err)
return fMock, fs.ErrorIsFile
@ -55,6 +55,7 @@ func TestGet(t *testing.T) {
}
func TestGetFile(t *testing.T) {
defer ClearMappings()
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
@ -63,7 +64,7 @@ func TestGetFile(t *testing.T) {
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, Entries())
assert.Equal(t, 1, Entries())
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@ -71,7 +72,7 @@ func TestGetFile(t *testing.T) {
assert.Equal(t, f, f2)
// check parent is there too
// check it is also found when referred to by parent name
f2, err = GetFn(context.Background(), "mock:/", create)
require.Nil(t, err)
require.NotNil(t, f2)
@ -80,6 +81,7 @@ func TestGetFile(t *testing.T) {
}
func TestGetFile2(t *testing.T) {
defer ClearMappings()
create := mockNewFs(t)
assert.Equal(t, 0, Entries())
@ -88,7 +90,7 @@ func TestGetFile2(t *testing.T) {
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, Entries())
assert.Equal(t, 1, Entries())
f2, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@ -96,7 +98,7 @@ func TestGetFile2(t *testing.T) {
assert.Equal(t, f, f2)
// check parent is there too
// check it is also found when referred to by parent name
f2, err = GetFn(context.Background(), "mock:/", create)
require.Nil(t, err)
require.NotNil(t, f2)
@ -124,22 +126,22 @@ func TestPutErr(t *testing.T) {
assert.Equal(t, 0, Entries())
PutErr("mock:file.txt", f, fs.ErrorIsFile)
PutErr("mock:/", f, fs.ErrorNotFoundInConfigFile)
assert.Equal(t, 1, Entries())
fNew, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
fNew, err := GetFn(context.Background(), "mock:/", create)
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
// Check canonicalisation
PutErr("mock:/file.txt", f, fs.ErrorIsFile)
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
@ -190,6 +192,75 @@ func TestPin(t *testing.T) {
Unpin(f2)
}
func TestPinFile(t *testing.T) {
defer ClearMappings()
create := mockNewFs(t)
// Test pinning and unpinning nonexistent
f, err := mockfs.NewFs(context.Background(), "mock", "/file.txt", nil)
require.NoError(t, err)
Pin(f)
Unpin(f)
// Now test pinning an existing
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, len(childParentMap))
Pin(f2)
assert.Equal(t, 1, Entries())
pinned, unpinned := EntriesWithPinCount()
assert.Equal(t, 1, pinned)
assert.Equal(t, 0, unpinned)
Unpin(f2)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 0, pinned)
assert.Equal(t, 1, unpinned)
// try a different child of the same parent, and parent
// should not add additional cache items
called = 0 // this one does create() because we haven't seen it before and don't yet know it's a file
f3, err := GetFn(context.Background(), "mock:/file2.txt", create)
assert.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 2, len(childParentMap))
parent, err := GetFn(context.Background(), "mock:/", create)
assert.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 2, len(childParentMap))
Pin(f3)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 1, pinned)
assert.Equal(t, 0, unpinned)
Unpin(f3)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 0, pinned)
assert.Equal(t, 1, unpinned)
Pin(parent)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 1, pinned)
assert.Equal(t, 0, unpinned)
Unpin(parent)
assert.Equal(t, 1, Entries())
pinned, unpinned = EntriesWithPinCount()
assert.Equal(t, 0, pinned)
assert.Equal(t, 1, unpinned)
// all 3 should have equal configstrings
assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(f3))
assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(parent))
}
func TestClearConfig(t *testing.T) {
create := mockNewFs(t)
@ -198,9 +269,9 @@ func TestClearConfig(t *testing.T) {
_, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 2, Entries()) // file + parent
assert.Equal(t, 1, Entries())
assert.Equal(t, 2, ClearConfig("mock"))
assert.Equal(t, 1, ClearConfig("mock"))
assert.Equal(t, 0, Entries())
}

View file

@ -2,7 +2,7 @@
package configstruct
import (
"encoding/json"
"encoding/csv"
"errors"
"fmt"
"reflect"
@ -31,7 +31,7 @@ func camelToSnake(in string) string {
//
// Builtin types are expected to be encoding as their natural
// stringificatons as produced by fmt.Sprint except for []string which
// is expected to be encoded as JSON with empty array encoded as "".
// is expected to be encoded a a CSV with empty array encoded as "".
//
// Any other types are expected to be encoded by their String()
// methods and decoded by their `Set(s string) error` methods.
@ -58,14 +58,18 @@ func StringToInterface(def interface{}, in string) (newValue interface{}, err er
case time.Duration:
newValue, err = time.ParseDuration(in)
case []string:
// JSON decode arrays of strings
if in != "" {
var out []string
err = json.Unmarshal([]byte(in), &out)
newValue = out
} else {
// Empty string we will treat as empty array
// CSV decode arrays of strings - ideally we would use
// fs.CommaSepList here but we can't as it would cause
// a circular import.
if len(in) == 0 {
newValue = []string{}
} else {
r := csv.NewReader(strings.NewReader(in))
newValue, err = r.Read()
switch _err := err.(type) {
case *csv.ParseError:
err = _err.Err // remove line numbers from the error message
}
}
default:
// Try using a Set method

View file

@ -204,9 +204,11 @@ func TestStringToInterface(t *testing.T) {
{"1m1s", fs.Duration(0), fs.Duration(61 * time.Second), ""},
{"1potato", fs.Duration(0), nil, `parsing "1potato" as fs.Duration failed: parsing time "1potato" as "2006-01-02": cannot parse "1potato" as "2006"`},
{``, []string{}, []string{}, ""},
{`[]`, []string(nil), []string{}, ""},
{`["hello"]`, []string{}, []string{"hello"}, ""},
{`["hello","world!"]`, []string(nil), []string{"hello", "world!"}, ""},
{`""`, []string(nil), []string{""}, ""},
{`hello`, []string{}, []string{"hello"}, ""},
{`"hello"`, []string{}, []string{"hello"}, ""},
{`hello,world!`, []string(nil), []string{"hello", "world!"}, ""},
{`"hello","world!"`, []string(nil), []string{"hello", "world!"}, ""},
{"1s", time.Duration(0), time.Second, ""},
{"1m1s", time.Duration(0), 61 * time.Second, ""},
{"1potato", time.Duration(0), nil, `parsing "1potato" as time.Duration failed: time: unknown unit "potato" in duration "1potato"`},

View file

@ -143,12 +143,32 @@ func installFlag(flags *pflag.FlagSet, name string, groupsString string) {
// Read default from environment if possible
envKey := fs.OptionToEnv(name)
if envValue, envFound := os.LookupEnv(envKey); envFound {
err := flags.Set(name, envValue)
if err != nil {
fs.Fatalf(nil, "Invalid value when setting --%s from environment variable %s=%q: %v", name, envKey, envValue, err)
isStringArray := false
opt, isOption := flag.Value.(*fs.Option)
if isOption {
_, isStringArray = opt.Default.([]string)
}
if isStringArray {
// Treat stringArray differently, treating the environment variable as a CSV array
var list fs.CommaSepList
err := list.Set(envValue)
if err != nil {
fs.Fatalf(nil, "Invalid value when setting stringArray --%s from environment variable %s=%q: %v", name, envKey, envValue, err)
}
// Set both the Value (so items on the command line get added) and DefValue so the help is correct
opt.Value = ([]string)(list)
flag.DefValue = list.String()
for _, v := range list {
fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, v, envKey, envValue)
}
} else {
err := flags.Set(name, envValue)
if err != nil {
fs.Fatalf(nil, "Invalid value when setting --%s from environment variable %s=%q: %v", name, envKey, envValue, err)
}
fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, flag.Value, envKey, envValue)
flag.DefValue = envValue
}
fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, flag.Value, envKey, envValue)
flag.DefValue = envValue
}
// Add flag to Group if it is a global flag

View file

@ -22,7 +22,7 @@ func mockNewFs(t *testing.T) func() {
require.NoError(t, err)
cache.Put("mock:/", f)
cache.Put(":mock:/", f)
f, err = mockfs.NewFs(ctx, "mock", "dir/file.txt", nil)
f, err = mockfs.NewFs(ctx, "mock", "dir/", nil)
require.NoError(t, err)
cache.PutErr("mock:dir/file.txt", f, fs.ErrorIsFile)
return func() {

View file

@ -85,7 +85,7 @@ var OptionsInfo = fs.Options{{
Groups: "RC",
}, {
Name: "metrics_addr",
Default: []string{""},
Default: []string{},
Help: "IPaddress:Port or :Port to bind metrics server to",
Groups: "Metrics",
}}.

View file

@ -37,7 +37,7 @@ func init() {
// If the server wasn't configured the *Server returned may be nil
func MetricsStart(ctx context.Context, opt *rc.Options) (*MetricsServer, error) {
jobs.SetOpt(opt) // set the defaults for jobs
if opt.MetricsHTTP.ListenAddr[0] != "" {
if len(opt.MetricsHTTP.ListenAddr) > 0 {
// Serve on the DefaultServeMux so can have global registrations appear
s, err := newMetricsServer(ctx, opt)
if err != nil {

View file

@ -264,14 +264,9 @@ func (o *Option) String() string {
if len(stringArray) == 0 {
return ""
}
// Encode string arrays as JSON
// Encode string arrays as CSV
// The default Go encoding can't be decoded uniquely
buf, err := json.Marshal(stringArray)
if err != nil {
Errorf(nil, "Can't encode default value for %q key - ignoring: %v", o.Name, err)
return "[]"
}
return string(buf)
return CommaSepList(stringArray).String()
}
return fmt.Sprint(v)
}
@ -531,7 +526,22 @@ func (oi *OptionsInfo) load() error {
// their values read from the options, environment variables and
// command line parameters.
func GlobalOptionsInit() error {
for _, opt := range OptionsRegistry {
var keys []string
for key := range OptionsRegistry {
keys = append(keys, key)
}
sort.Slice(keys, func(i, j int) bool {
// Sort alphabetically, but with "main" first
if keys[i] == "main" {
return true
}
if keys[j] == "main" {
return false
}
return keys[i] < keys[j]
})
for _, key := range keys {
opt := OptionsRegistry[key]
err := opt.load()
if err != nil {
return err

View file

@ -597,6 +597,108 @@ func TestServerSideCopy(t *testing.T) {
fstest.CheckItems(t, FremoteCopy, file1)
}
// Test copying a file over itself
func TestCopyOverSelf(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
r.CheckRemoteItems(t, file1)
file2 := r.WriteFile("sub dir/hello world", "hello world again", t2)
r.CheckLocalItems(t, file2)
ctx = predictDstFromLogger(ctx)
err := CopyDir(ctx, r.Fremote, r.Flocal, false)
require.NoError(t, err)
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
r.CheckRemoteItems(t, file2)
}
// Test server-side copying a file over itself
func TestServerSideCopyOverSelf(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
r.CheckRemoteItems(t, file1)
FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote()
require.NoError(t, err)
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
ctx = predictDstFromLogger(ctx)
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
require.NoError(t, err)
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
fstest.CheckItems(t, FremoteCopy, file1)
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
r.CheckRemoteItems(t, file2)
ctx = predictDstFromLogger(ctx)
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
require.NoError(t, err)
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
fstest.CheckItems(t, FremoteCopy, file2)
}
// Test moving a file over itself
func TestMoveOverSelf(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
r.CheckRemoteItems(t, file1)
file2 := r.WriteFile("sub dir/hello world", "hello world again", t2)
r.CheckLocalItems(t, file2)
ctx = predictDstFromLogger(ctx)
err := MoveDir(ctx, r.Fremote, r.Flocal, false, false)
require.NoError(t, err)
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2)
}
// Test server-side moving a file over itself
func TestServerSideMoveOverSelf(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
r.CheckRemoteItems(t, file1)
FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote()
require.NoError(t, err)
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
ctx = predictDstFromLogger(ctx)
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
require.NoError(t, err)
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
fstest.CheckItems(t, FremoteCopy, file1)
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
r.CheckRemoteItems(t, file2)
// ctx = predictDstFromLogger(ctx)
err = MoveDir(ctx, FremoteCopy, r.Fremote, false, false)
require.NoError(t, err)
// testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // not currently supported
r.CheckRemoteItems(t)
fstest.CheckItems(t, FremoteCopy, file2)
// check that individual file moves also work without MoveDir
file3 := r.WriteObject(ctx, "sub dir/hello world", "hello world a third time", t3)
r.CheckRemoteItems(t, file3)
ctx = predictDstFromLogger(ctx)
fs.Debugf(nil, "testing file moves")
err = moveDir(ctx, FremoteCopy, r.Fremote, false, false)
require.NoError(t, err)
testLoggerVsLsf(ctx, FremoteCopy, operations.GetLoggerOpt(ctx).JSON, t)
r.CheckRemoteItems(t)
fstest.CheckItems(t, FremoteCopy, file3)
}
// Check that if the local file doesn't exist when we copy it up,
// nothing happens to the remote file
func TestCopyAfterDelete(t *testing.T) {
@ -2320,15 +2422,19 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
r.CheckRemoteItems(t, file1b, file2, file3a, file1a)
}
func TestSyncBackupDir(t *testing.T) {
testSyncBackupDir(t, "backup", "", false)
}
func TestSyncBackupDirWithSuffix(t *testing.T) {
testSyncBackupDir(t, "backup", ".bak", false)
}
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
testSyncBackupDir(t, "backup", "-2019-01-01", true)
}
func TestSyncBackupDirSuffixOnly(t *testing.T) {
testSyncBackupDir(t, "", ".bak", false)
}
@ -2806,7 +2912,7 @@ func predictDstFromLogger(ctx context.Context) context.Context {
}
func DstLsf(ctx context.Context, Fremote fs.Fs) *bytes.Buffer {
var opt = operations.ListJSONOpt{
opt := operations.ListJSONOpt{
NoModTime: false,
NoMimeType: true,
DirsOnly: false,

View file

@ -1,4 +1,4 @@
package fs
// VersionTag of rclone
var VersionTag = "v1.68.0"
var VersionTag = "v1.69.0"

View file

@ -6,6 +6,7 @@ import (
"fmt"
"os"
"path"
"slices"
"github.com/rclone/rclone/fs"
yaml "gopkg.in/yaml.v2"
@ -35,6 +36,7 @@ type Backend struct {
CleanUp bool // when running clean, run cleanup first
Ignore []string // test names to ignore the failure of
Tests []string // paths of tests to run, blank for all
IgnoreTests []string // paths of tests not to run, blank for none
ListRetries int // -list-retries if > 0
ExtraTime float64 // factor to multiply the timeout by
}
@ -42,15 +44,15 @@ type Backend struct {
// includeTest returns true if this backend should be included in this
// test
func (b *Backend) includeTest(t *Test) bool {
// Is this test ignored
if slices.Contains(b.IgnoreTests, t.Path) {
return false
}
// Empty b.Tests imples do all of them except the ignored
if len(b.Tests) == 0 {
return true
}
for _, testPath := range b.Tests {
if testPath == t.Path {
return true
}
}
return false
return slices.Contains(b.Tests, t.Path)
}
// MakeRuns creates Run objects the Backend and Test

View file

@ -395,6 +395,10 @@ backends:
- backend: "cache"
remote: "TestCache:"
fastlist: false
ignoretests:
- TestBisyncLocalRemote
- TestBisyncRemoteLocal
- TestBisyncRemoteRemote
- backend: "mega"
remote: "TestMega:"
fastlist: false

2
go.mod
View file

@ -79,7 +79,6 @@ require (
go.etcd.io/bbolt v1.3.10
goftp.io/server/v2 v2.0.1
golang.org/x/crypto v0.25.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/net v0.27.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.8.0
@ -205,6 +204,7 @@ require (
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/tools v0.23.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect

16
lib/cache/cache.go vendored
View file

@ -260,3 +260,19 @@ func (c *Cache) SetFinalizer(finalize func(interface{})) {
c.finalize = finalize
c.mu.Unlock()
}
// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache
//
// Each entry is counted only once, regardless of entry.pinCount
func (c *Cache) EntriesWithPinCount() (pinned, unpinned int) {
c.mu.Lock()
for _, entry := range c.cache {
if entry.pinCount <= 0 {
unpinned++
} else {
pinned++
}
}
c.mu.Unlock()
return pinned, unpinned
}

View file

@ -4,10 +4,10 @@ package exitcode
const (
// Success is returned when rclone finished without error.
Success = iota
// UsageError is returned when there was a syntax or usage error in the arguments.
UsageError
// UncategorizedError is returned for any error not categorised otherwise.
UncategorizedError
// UsageError is returned when there was a syntax or usage error in the arguments.
UsageError
// DirNotFound is returned when a source or destination directory is not found.
DirNotFound
// FileNotFound is returned when a source or destination file is not found.

View file

@ -40,7 +40,7 @@ type Dir struct {
modTimeMu sync.Mutex // protects the following
modTime time.Time
_hasVirtual atomic.Bool // shows if the directory has virtual entries
_virtuals atomic.Int32 // number of virtual directory entries in this directory and children
}
//go:generate stringer -type=vState
@ -67,7 +67,6 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
items: make(map[string]Node),
}
d.cleanupTimer = time.AfterFunc(time.Duration(vfs.Opt.DirCacheTime*2), d.cacheCleanup)
d.setHasVirtual(false)
return d
}
@ -198,58 +197,60 @@ func (d *Dir) Node() Node {
return d
}
// hasVirtual returns whether the directory has virtual entries
// hasVirtual returns whether the directory or children has virtual entries
func (d *Dir) hasVirtual() bool {
return d._hasVirtual.Load()
return d._virtuals.Load() != 0
}
// setHasVirtual sets the hasVirtual flag for the directory
func (d *Dir) setHasVirtual(hasVirtual bool) {
d._hasVirtual.Store(hasVirtual)
// addVirtual adds n virtual items to this directory and all of its parents
func (d *Dir) addVirtual(n int32) {
for d != nil {
d._virtuals.Add(n)
d = d.parent
}
}
// ForgetAll forgets directory entries for this directory and any children.
//
// It does not invalidate or clear the cache of the parent directory.
//
// It returns true if the directory or any of its children had virtual entries
// so could not be forgotten. Children which didn't have virtual entries and
// children with virtual entries will be forgotten even if true is returned.
// It returns true if the directory or any of its children had virtual
// entries so could not be forgotten. Children which didn't have
// virtual entries will be forgotten even if true is returned.
func (d *Dir) ForgetAll() (hasVirtual bool) {
// We run this part with RLock only to avoid deadlocks in the recursion
d.mu.RLock()
fs.Debugf(d.path, "forgetting directory cache")
for _, node := range d.items {
if dir, ok := node.(*Dir); ok {
if dir.ForgetAll() {
d.setHasVirtual(true)
}
dir.ForgetAll()
}
}
d.mu.RUnlock()
// We run this part with Lock so we can modify the Dir
d.mu.Lock()
defer d.mu.Unlock()
// Purge any unnecessary virtual entries
d._purgeVirtual()
d.read = time.Time{}
// Check if this dir has virtual entries
if len(d.virtual) != 0 {
d.setHasVirtual(true)
}
// Don't clear directory entries if there are virtual entries in this
// directory or any children
if !d.hasVirtual() {
hasVirtual = d.hasVirtual()
if !hasVirtual {
d.read = time.Time{}
d.items = make(map[string]Node)
d.cleanupTimer.Stop()
} else {
d.cleanupTimer.Reset(time.Duration(d.vfs.Opt.DirCacheTime * 2))
}
return d.hasVirtual()
return hasVirtual
}
// forgetDirPath clears the cache for itself and all subdirectories if
@ -448,8 +449,10 @@ func (d *Dir) addObject(node Node) {
if node.IsDir() {
vAdd = vAddDir
}
if _, found := d.virtual[leaf]; !found {
d.addVirtual(1)
}
d.virtual[leaf] = vAdd
d.setHasVirtual(true)
fs.Debugf(d.path, "Added virtual directory entry %v: %q", vAdd, leaf)
d.mu.Unlock()
}
@ -492,8 +495,10 @@ func (d *Dir) delObject(leaf string) {
if d.virtual == nil {
d.virtual = make(map[string]vState)
}
if _, found := d.virtual[leaf]; !found {
d.addVirtual(1)
}
d.virtual[leaf] = vDel
d.setHasVirtual(true)
fs.Debugf(d.path, "Added virtual directory entry %v: %q", vDel, leaf)
d.mu.Unlock()
}
@ -580,9 +585,9 @@ func (d *Dir) _deleteVirtual(name string) {
return
}
delete(d.virtual, name)
d.addVirtual(-1)
if len(d.virtual) == 0 {
d.virtual = nil
d.setHasVirtual(false)
}
fs.Debugf(d.path, "Removed virtual directory entry %v: %q", virtualState, name)
}

View file

@ -607,3 +607,51 @@ func TestDirRename(t *testing.T) {
func TestDirStructSize(t *testing.T) {
t.Logf("Dir struct has size %d bytes", unsafe.Sizeof(Dir{}))
}
// Check that open files appear in the directory listing properly after a forget
func TestDirFileOpen(t *testing.T) {
_, vfs, dir, _ := dirCreate(t)
assert.False(t, dir.hasVirtual())
assert.False(t, dir.parent.hasVirtual())
_, err := dir.Mkdir("sub")
require.NoError(t, err)
assert.True(t, dir.hasVirtual())
assert.True(t, dir.parent.hasVirtual())
fd0, err := vfs.Create("dir/sub/file0")
require.NoError(t, err)
_, err = fd0.Write([]byte("hello"))
require.NoError(t, err)
defer func() {
require.NoError(t, fd0.Close())
}()
fd2, err := vfs.Create("dir/sub/file2")
require.NoError(t, err)
_, err = fd2.Write([]byte("hello world!"))
require.NoError(t, err)
require.NoError(t, fd2.Close())
assert.True(t, dir.hasVirtual())
assert.True(t, dir.hasVirtual())
assert.True(t, dir.parent.hasVirtual())
// Now forget the directory
hasVirtual := dir.parent.ForgetAll()
assert.True(t, hasVirtual)
assert.True(t, dir.hasVirtual())
assert.True(t, dir.parent.hasVirtual())
// Check the files can still be found
fi, err := vfs.Stat("dir/sub/file0")
require.NoError(t, err)
assert.Equal(t, int64(5), fi.Size())
fi, err = vfs.Stat("dir/sub/file2")
require.NoError(t, err)
assert.Equal(t, int64(12), fi.Size())
}