forked from TrueCloudLab/rclone
Compare commits
44 commits
tcl/master
...
fix-8106-s
Author | SHA1 | Date | |
---|---|---|---|
|
0ccfbe71e4 | ||
|
22e13eea47 | ||
|
de9b593f02 | ||
|
b2b4f8196c | ||
|
84cebb6872 | ||
|
cb9f4f8461 | ||
|
498d9cfa85 | ||
|
109e4ed0ed | ||
|
353270263a | ||
|
f8d782c02d | ||
|
3dec664a19 | ||
|
a849fd59f0 | ||
|
462a1cf491 | ||
|
0b7b3cacdc | ||
|
976103d50b | ||
|
192524c004 | ||
|
28667f58bf | ||
|
c669f4e218 | ||
|
1a9e6a527d | ||
|
8c48cadd9c | ||
|
76e1ba8c46 | ||
|
232e4cd18f | ||
|
88141928f2 | ||
|
a2a0388036 | ||
|
48543d38e8 | ||
|
eceb390152 | ||
|
f4deffdc96 | ||
|
c172742cef | ||
|
7daed30754 | ||
|
b1b4c7f27b | ||
|
ed84553dc1 | ||
|
c94edbb76b | ||
|
2dcb327bc0 | ||
|
874d66658e | ||
|
3af757e26d | ||
|
fef1b61585 | ||
|
3fca7a60a5 | ||
|
6b3f41fa0c | ||
|
3d0ee47aa2 | ||
|
da70088b11 | ||
|
1bc9b94cf2 | ||
|
15a026d3be | ||
|
ad122c6f6f | ||
|
b155231cdd |
54 changed files with 1316 additions and 249 deletions
|
@ -32,15 +32,27 @@ jobs:
|
|||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
|
|
|
@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
|||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
|
|
6
Makefile
6
Makefile
|
@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md
|
|||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
|
|
@ -168,6 +168,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
|||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.68.0
|
||||
v1.69.0
|
||||
|
|
|
@ -209,6 +209,22 @@ rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
|||
keys instead of setting ` + "`service_principal_file`" + `.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_msi",
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||
|
@ -243,6 +259,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
|||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
|
@ -438,10 +468,12 @@ type Options struct {
|
|||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
|
@ -725,7 +757,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
|
@ -875,6 +908,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
|
|
|
@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password
|
|||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_upload",
|
||||
Default: false,
|
||||
Help: `Don't check the upload is OK
|
||||
|
||||
Normally rclone will try to check the upload exists after it has
|
||||
uploaded a file to make sure the size and modification time are as
|
||||
expected.
|
||||
|
||||
This flag stops rclone doing these checks. This enables uploading to
|
||||
folders which are write only.
|
||||
|
||||
You will likely need to use the --inplace flag also if uploading to
|
||||
a write only folder.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
|
@ -232,6 +248,7 @@ type Options struct {
|
|||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
|
@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if o.fs.opt.NoCheckUpload {
|
||||
o.info = &FileInfo{
|
||||
Name: o.remote,
|
||||
Size: uint64(src.Size()),
|
||||
ModTime: src.ModTime(ctx),
|
||||
precise: true,
|
||||
IsDir: false,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
|
@ -160,6 +159,34 @@ listings and transferred.
|
|||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "proxy",
|
||||
Default: "",
|
||||
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||
|
||||
First run with
|
||||
|
||||
gphotosdl -login
|
||||
|
||||
Then once you have logged into google photos close the browser window
|
||||
and run
|
||||
|
||||
gphotosdl
|
||||
|
||||
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||
rclone use the proxy.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
@ -181,6 +208,7 @@ type Options struct {
|
|||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
Proxy string `config:"proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
|
@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
|||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
// defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
|
@ -667,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
|||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
|
@ -684,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
|
@ -737,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
|||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
|
@ -761,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
|
@ -834,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
|
@ -935,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
|
@ -965,16 +993,20 @@ func (o *Object) downloadURL() string {
|
|||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
defer log.Trace(o, "")("")
|
||||
// defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
url := o.downloadURL()
|
||||
if o.fs.opt.Proxy != "" {
|
||||
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.downloadURL(),
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
@ -1067,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
|
|
|
@ -6,6 +6,7 @@ package local
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
|
@ -22,7 +23,7 @@ import (
|
|||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
|
||||
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
|
@ -30,6 +31,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
|
@ -44,11 +48,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = Clone(srcObj.path, f.localPath(remote))
|
||||
srcPath := srcObj.path
|
||||
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = Clone(srcPath, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote, "server-side cloned!")
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
|
|
|
@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) {
|
|||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Test corrupted on transfer
|
||||
|
@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
b := bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
@ -395,7 +394,6 @@ func TestMetadata(t *testing.T) {
|
|||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
|
@ -572,4 +570,35 @@ func TestCopySymlink(t *testing.T) {
|
|||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
|
||||
// Set fs into "-L/--copy-links" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err = f.NewObject(ctx, "src/link.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a NON-symlink and it has the right contents
|
||||
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||
fi, err := os.Lstat(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
|
||||
// Test that copying a normal file also works
|
||||
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -942,7 +942,8 @@ func errorHandler(resp *http.Response) error {
|
|||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
// Redirects have no body so don't report an error
|
||||
if err != nil && resp.Header.Get("Location") == "" {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
if errResponse.ErrorInfo.Code == "" {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||
|
|
|
@ -404,6 +404,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var uInfo usersInfoResponse
|
||||
var resp *http.Response
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/info.json/" + f.session.SessionID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(uInfo.StorageUsed),
|
||||
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
|
||||
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
|
@ -1147,6 +1173,7 @@ var (
|
|||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
|
|
|
@ -231,3 +231,10 @@ type permissions struct {
|
|||
type uploadFileChunkReply struct {
|
||||
TotalWritten int64 `json:"TotalWritten"`
|
||||
}
|
||||
|
||||
// usersInfoResponse describes OpenDrive users/info.json response
|
||||
type usersInfoResponse struct {
|
||||
// This response contains many other values but these are the only ones currently in use
|
||||
StorageUsed int64 `json:"StorageUsed,string"`
|
||||
MaxStorage int64 `json:"MaxStorage,string"`
|
||||
}
|
||||
|
|
|
@ -513,6 +513,72 @@ type RequestDecompress struct {
|
|||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// CaptchaToken is a response to requestCaptchaToken api call
|
||||
type CaptchaToken struct {
|
||||
CaptchaToken string `json:"captcha_token"`
|
||||
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
||||
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *CaptchaToken) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *CaptchaToken) Valid() bool {
|
||||
return t != nil && t.CaptchaToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// CaptchaTokenRequest is to request for captcha token
|
||||
type CaptchaTokenRequest struct {
|
||||
Action string `json:"action,omitempty"`
|
||||
CaptchaToken string `json:"captcha_token,omitempty"`
|
||||
ClientID string `json:"client_id,omitempty"`
|
||||
DeviceID string `json:"device_id,omitempty"`
|
||||
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
|
||||
type CaptchaTokenMeta struct {
|
||||
CaptchaSign string `json:"captcha_sign,omitempty"`
|
||||
ClientVersion string `json:"client_version,omitempty"`
|
||||
PackageName string `json:"package_name,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
|
||||
UserName string `json:"username,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
}
|
||||
|
||||
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
|
||||
type Token struct {
|
||||
TokenType string `json:"token_type"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
Sub string `json:"sub"`
|
||||
}
|
||||
|
||||
// Expiry returns expiry from expires in, so it should be called on retrieval
|
||||
// e must be non-nil.
|
||||
func (e *Token) Expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
|
|
@ -3,8 +3,10 @@ package pikpak
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -14,10 +16,13 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
|
@ -262,15 +267,20 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
if src.Size() == 0 {
|
||||
// If src is zero-length, the API will return
|
||||
// Error "cid and file_size is required" (400)
|
||||
// In this case, we can simply return cid == gcid
|
||||
return cid, nil
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("cid", cid)
|
||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
}
|
||||
|
||||
info := struct {
|
||||
|
@ -368,11 +378,23 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
|||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// unWrapObjectInfo returns the underlying Object unwrapped as much as
|
||||
// possible or nil even if it is an OverrideRemote
|
||||
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
|
||||
if o, ok := oi.(fs.Object); ok {
|
||||
return fs.UnWrapObject(o)
|
||||
} else if do, ok := oi.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
return do.UnWrap()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// calcCid calculates Cid from source
|
||||
//
|
||||
// Cid is a simplified version of Gcid
|
||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
srcObj := unWrapObjectInfo(src)
|
||||
if srcObj == nil {
|
||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||
}
|
||||
|
@ -408,6 +430,8 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------ authorization
|
||||
|
||||
// randomly generates device id used for request header 'x-device-id'
|
||||
//
|
||||
// original javascript implementation
|
||||
|
@ -428,3 +452,206 @@ func genDeviceID() string {
|
|||
}
|
||||
return string(base)
|
||||
}
|
||||
|
||||
var md5Salt = []string{
|
||||
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||
"+r6CQVxjzJV6LCV",
|
||||
"F",
|
||||
"pFJRC",
|
||||
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||
"/750aCr4lm/Sly/c",
|
||||
"RB+DT/gZCrbV",
|
||||
"",
|
||||
"CyLsf7hdkIRxRm215hl",
|
||||
"7xHvLi2tOYP0Y92b",
|
||||
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||
"1UI3",
|
||||
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||
"ihtqpG6FMt65+Xk+tWUH2",
|
||||
"NhXXU9rg4XXdzo7u5o",
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
|
||||
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
||||
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
|
||||
for _, salt := range md5Salt {
|
||||
str = md5Sum(str + salt)
|
||||
}
|
||||
sign = "1." + str
|
||||
return
|
||||
}
|
||||
|
||||
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
|
||||
req = &api.CaptchaTokenRequest{
|
||||
Action: action,
|
||||
CaptchaToken: oldToken, // can be empty initially
|
||||
ClientID: clientID,
|
||||
DeviceID: opt.DeviceID,
|
||||
Meta: new(api.CaptchaTokenMeta),
|
||||
}
|
||||
switch action {
|
||||
case "POST:/v1/auth/signin":
|
||||
req.Meta.UserName = opt.Username
|
||||
default:
|
||||
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
|
||||
req.Meta.CaptchaSign = captchaSign
|
||||
req.Meta.Timestamp = timestamp
|
||||
req.Meta.ClientVersion = clientVersion
|
||||
req.Meta.PackageName = packageName
|
||||
req.Meta.UserID = opt.UserID
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CaptchaTokenSource stores updated captcha tokens in the config file
|
||||
type CaptchaTokenSource struct {
|
||||
mu sync.Mutex
|
||||
m configmap.Mapper
|
||||
opt *Options
|
||||
token *api.CaptchaToken
|
||||
ctx context.Context
|
||||
rst *pikpakClient
|
||||
}
|
||||
|
||||
// initialize CaptchaTokenSource from rclone.conf if possible
|
||||
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
|
||||
token := new(api.CaptchaToken)
|
||||
tokenString, ok := m.Get("captcha_token")
|
||||
if !ok || tokenString == "" {
|
||||
fs.Debugf(nil, "failed to read captcha token out of config file")
|
||||
} else {
|
||||
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
|
||||
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
|
||||
}
|
||||
}
|
||||
return &CaptchaTokenSource{
|
||||
m: m,
|
||||
opt: opt,
|
||||
token: token,
|
||||
ctx: ctx,
|
||||
rst: newPikpakClient(getClient(ctx, opt), opt),
|
||||
}
|
||||
}
|
||||
|
||||
// requestToken retrieves captcha token from API
|
||||
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
|
||||
}
|
||||
var info *api.CaptchaToken
|
||||
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
if err == nil && info.ExpiresIn != 0 {
|
||||
// populate to Expiry
|
||||
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
|
||||
cts.token = info // update with a new one
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
|
||||
oldToken := ""
|
||||
if cts.token != nil {
|
||||
oldToken = cts.token.CaptchaToken
|
||||
}
|
||||
action := "GET:/drive/v1/about"
|
||||
if opts.RootURL == "" && opts.Path != "" {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
|
||||
} else if u, err := url.Parse(opts.RootURL); err == nil {
|
||||
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
|
||||
}
|
||||
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
|
||||
if err := cts.requestToken(cts.ctx, req); err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
|
||||
}
|
||||
|
||||
// put it into rclone.conf
|
||||
tokenBytes, err := json.Marshal(cts.token)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
|
||||
}
|
||||
cts.m.Set("captcha_token", string(tokenBytes))
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
|
||||
// Invalidate resets existing captcha token for a forced refresh
|
||||
func (cts *CaptchaTokenSource) Invalidate() {
|
||||
cts.mu.Lock()
|
||||
cts.token.CaptchaToken = ""
|
||||
cts.mu.Unlock()
|
||||
}
|
||||
|
||||
// Token returns a valid captcha token
|
||||
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
|
||||
cts.mu.Lock()
|
||||
defer cts.mu.Unlock()
|
||||
if cts.token.Valid() {
|
||||
return cts.token.CaptchaToken, nil
|
||||
}
|
||||
return cts.refreshToken(opts)
|
||||
}
|
||||
|
||||
// pikpakClient wraps rest.Client with a handle of captcha token
|
||||
type pikpakClient struct {
|
||||
opt *Options
|
||||
client *rest.Client
|
||||
captcha *CaptchaTokenSource
|
||||
}
|
||||
|
||||
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
|
||||
// * error handler
|
||||
// * root url
|
||||
// * default headers
|
||||
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
|
||||
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
|
||||
for key, val := range map[string]string{
|
||||
"Referer": "https://mypikpak.com/",
|
||||
"x-client-id": clientID,
|
||||
"x-client-version": clientVersion,
|
||||
"x-device-id": opt.DeviceID,
|
||||
// "x-device-model": "firefox%2F129.0",
|
||||
// "x-device-name": "PC-Firefox",
|
||||
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
|
||||
// "x-net-work-type": "NONE",
|
||||
// "x-os-version": "Win32",
|
||||
// "x-platform-version": "1",
|
||||
// "x-protocol-version": "301",
|
||||
// "x-provider-name": "NONE",
|
||||
// "x-sdk-version": "8.0.3",
|
||||
} {
|
||||
client.SetHeader(key, val)
|
||||
}
|
||||
return &pikpakClient{
|
||||
client: client,
|
||||
opt: opt,
|
||||
}
|
||||
}
|
||||
|
||||
// This should be called right after pikpakClient initialized
|
||||
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
|
||||
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
if c.captcha != nil {
|
||||
token, err := c.captcha.Token(opts)
|
||||
if err != nil || token == "" {
|
||||
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
|
||||
}
|
||||
if opts.ExtraHeaders == nil {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
}
|
||||
opts.ExtraHeaders["x-captcha-token"] = token
|
||||
}
|
||||
return c.client.CallJSON(ctx, opts, request, response)
|
||||
}
|
||||
|
||||
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
|
||||
return c.client.Call(ctx, opts)
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ package pikpak
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -51,6 +52,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
|
@ -64,15 +66,17 @@ import (
|
|||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "YNxT9w7GMdWvEOKa"
|
||||
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
clientID = "YUMx5nI8ZU8Ap8pm"
|
||||
clientVersion = "2.0.0"
|
||||
packageName = "mypikpak.com"
|
||||
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -85,43 +89,53 @@ var (
|
|||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
ClientID: clientID,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Returns OAuthOptions modified for pikpak
|
||||
func pikpakOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Advanced = true
|
||||
} else if opt.Name == config.ConfigClientSecret {
|
||||
opt.Advanced = true
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
|
||||
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
|
||||
// override default client id/secret
|
||||
if id, ok := m.Get("client_id"); ok && id != "" {
|
||||
oauthConfig.ClientID = id
|
||||
}
|
||||
if secret, ok := m.Get("client_secret"); ok && secret != "" {
|
||||
oauthConfig.ClientSecret = secret
|
||||
if opt.Username == "" {
|
||||
return errors.New("no username")
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
|
||||
}
|
||||
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
|
||||
// new device id if necessary
|
||||
if len(opt.DeviceID) != 32 {
|
||||
opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
}
|
||||
req := map[string]string{
|
||||
"username": opt.Username,
|
||||
"password": pass,
|
||||
"client_id": clientID,
|
||||
}
|
||||
var token api.Token
|
||||
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
|
||||
rst.captcha.Invalidate()
|
||||
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
|
||||
}
|
||||
t := &oauth2.Token{
|
||||
AccessToken: token.AccessToken,
|
||||
TokenType: token.TokenType,
|
||||
RefreshToken: token.RefreshToken,
|
||||
Expiry: token.Expiry(),
|
||||
}
|
||||
return oauthutil.PutToken(name, m, t, false)
|
||||
}
|
||||
|
||||
|
@ -160,7 +174,7 @@ func init() {
|
|||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
|
@ -170,6 +184,18 @@ func init() {
|
|||
Help: "Pikpak password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "device_id",
|
||||
Help: "Device ID used for authorization.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_agent",
|
||||
Default: defaultUserAgent,
|
||||
Advanced: true,
|
||||
Help: fmt.Sprintf(`HTTP user agent for pikpak.
|
||||
|
||||
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
@ -248,7 +274,7 @@ this may help to speed up the transfers.`,
|
|||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -256,6 +282,9 @@ this may help to speed up the transfers.`,
|
|||
type Options struct {
|
||||
Username string `config:"user"`
|
||||
Password string `config:"pass"`
|
||||
UserID string `config:"user_id"` // only available during runtime
|
||||
DeviceID string `config:"device_id"`
|
||||
UserAgent string `config:"user_agent"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
|
@ -271,11 +300,10 @@ type Fs struct {
|
|||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
rst *rest.Client // the connection to the server
|
||||
rst *pikpakClient // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootFolderID string // the id of the root folder
|
||||
deviceID string // device id used for api requests
|
||||
client *http.Client // authorized client
|
||||
m configmap.Mapper
|
||||
tokenMu *sync.Mutex // when renewing tokens
|
||||
|
@ -429,6 +457,12 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||
} else if apiErr.Reason == "file_space_not_enough" {
|
||||
// "file_space_not_enough" (8): Storage space is not enough
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
|
||||
// "captcha_invalid" (9): Verification code is invalid
|
||||
// This error occurred on the POST:/drive/v1/files endpoint
|
||||
// when a zero-byte file was uploaded with an invalid captcha token
|
||||
f.rst.captcha.Invalidate()
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -452,13 +486,36 @@ func errorHandler(resp *http.Response) error {
|
|||
return errResponse
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
// Override few config settings and create a client
|
||||
newCtx, ci := fs.AddConfig(ctx)
|
||||
ci.UserAgent = opt.UserAgent
|
||||
return fshttp.NewClient(newCtx)
|
||||
}
|
||||
|
||||
// newClientWithPacer sets a new http/rest client with a pacer to Fs
|
||||
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
|
||||
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
|
||||
var ts *oauthutil.TokenSource
|
||||
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// parse user_id from oauth access token for later use
|
||||
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
|
||||
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
|
||||
info := struct {
|
||||
UserID string `json:"sub,omitempty"`
|
||||
}{}
|
||||
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
|
||||
f.opt.UserID = info.UserID
|
||||
}
|
||||
}
|
||||
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
return nil
|
||||
}
|
||||
|
@ -491,10 +548,19 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||
CanHaveEmptyDirectories: true, // can have empty directories
|
||||
NoMultiThreading: true, // can't have multiple threads downloading
|
||||
}).Fill(ctx, f)
|
||||
f.deviceID = genDeviceID()
|
||||
|
||||
// new device id if necessary
|
||||
if len(f.opt.DeviceID) != 32 {
|
||||
f.opt.DeviceID = genDeviceID()
|
||||
m.Set("device_id", f.opt.DeviceID)
|
||||
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
|
||||
}
|
||||
|
||||
if err := f.newClientWithPacer(ctx); err != nil {
|
||||
return nil, err
|
||||
// re-authorize if necessary
|
||||
if strings.Contains(err.Error(), "invalid_grant") {
|
||||
return f, f.reAuthorize(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
|
@ -1707,7 +1773,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
|||
gcid, err := o.fs.getGcid(ctx, src)
|
||||
if err != nil || gcid == "" {
|
||||
fs.Debugf(o, "calculating gcid: %v", err)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
// No buffering; directly calculate gcid from source
|
||||
rc, err := srcObj.Open(ctx)
|
||||
if err != nil {
|
||||
|
|
|
@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithLink(ctx, remote, nil)
|
||||
_, err := tempF.newObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
|
@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithLink(ctx, remote, nil)
|
||||
return f.newObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
|
||||
|
@ -516,35 +516,27 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
|
|||
return link, nil
|
||||
}
|
||||
|
||||
// readMetaDataForRemote reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// readMetaDataForLink reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
|
||||
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
|
||||
var err error
|
||||
if err = f.pacer.Call(func() (bool, error) {
|
||||
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||
return shouldRetry(ctx, err)
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link, fileSystemAttrs, nil
|
||||
return fileSystemAttrs, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// Return an Object from a path and link
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
|
||||
if o.link != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
|
||||
if err != nil {
|
||||
return err
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
o.id = link.LinkID
|
||||
|
@ -554,6 +546,10 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
|||
o.mimetype = link.MIMEType
|
||||
o.link = link
|
||||
|
||||
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileSystemAttrs != nil {
|
||||
o.modTime = fileSystemAttrs.ModificationTime
|
||||
o.originalSize = &fileSystemAttrs.Size
|
||||
|
@ -561,23 +557,18 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
|||
o.digests = &fileSystemAttrs.Digests
|
||||
}
|
||||
|
||||
return nil
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// Return an Object from a path only
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
err := o.readMetaData(ctx, link)
|
||||
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
return f.newObjectWithLink(ctx, remote, link)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
|
|
|
@ -2606,6 +2606,35 @@ knows about - please make a bug report if not.
|
|||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_bucket",
|
||||
Help: strings.ReplaceAll(`Set to use AWS Directory Buckets
|
||||
|
||||
If you are using an AWS Directory Bucket then set this flag.
|
||||
|
||||
This will ensure no |Content-Md5| headers are sent and ensure |ETag|
|
||||
headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will
|
||||
be set on all objects whether single or multipart uploaded.
|
||||
|
||||
This also sets |no_check_bucket = true|.
|
||||
|
||||
Note that Directory Buckets do not support:
|
||||
|
||||
- Versioning
|
||||
- |Content-Encoding: gzip|
|
||||
|
||||
Rclone limitations with Directory Buckets:
|
||||
|
||||
- rclone does not support creating Directory Buckets with |rclone mkdir|
|
||||
- ... or removing them with |rclone rmdir| yet
|
||||
- Directory Buckets do not appear when doing |rclone lsf| at the top level.
|
||||
- Rclone can't remove auto created directories yet. In theory this should
|
||||
work with |directory_markers = true| but it doesn't.
|
||||
- Directories don't seem to appear in recursive (ListR) listings.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
Name: "sdk_log_mode",
|
||||
Help: strings.ReplaceAll(`Set to debug the SDK
|
||||
|
@ -2780,6 +2809,7 @@ type Options struct {
|
|||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
|
@ -3052,9 +3082,16 @@ func (s3logger) Logf(classification logging.Classification, format string, v ...
|
|||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
var awsConfig aws.Config
|
||||
// Make the default static auth
|
||||
v := aws.Credentials{
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
SecretAccessKey: opt.SecretAccessKey,
|
||||
SessionToken: opt.SessionToken,
|
||||
}
|
||||
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
|
||||
|
||||
// Try to fill in the config from the environment if env_auth=true
|
||||
if opt.EnvAuth {
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||
// Set the name of the profile if supplied
|
||||
if opt.Profile != "" {
|
||||
|
@ -3079,13 +3116,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
|||
case opt.SecretAccessKey == "":
|
||||
return nil, errors.New("secret_access_key not found")
|
||||
default:
|
||||
// Make the static auth
|
||||
v := aws.Credentials{
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
SecretAccessKey: opt.SecretAccessKey,
|
||||
SessionToken: opt.SessionToken,
|
||||
}
|
||||
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
|
||||
// static credentials are already set
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3547,6 +3578,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// MD5 digest of their object data.
|
||||
f.etagIsNotMD5 = true
|
||||
}
|
||||
if opt.DirectoryBucket {
|
||||
// Objects uploaded to directory buckets appear to have random ETags
|
||||
//
|
||||
// This doesn't appear to be documented
|
||||
f.etagIsNotMD5 = true
|
||||
// The normal API doesn't work for creating directory buckets, so don't try
|
||||
f.opt.NoCheckBucket = true
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
|
@ -6028,6 +6067,10 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
}
|
||||
if w.f.opt.DirectoryBucket {
|
||||
// Directory buckets do not support "Content-Md5" header
|
||||
uploadPartReq.ContentMD5 = nil
|
||||
}
|
||||
var uout *s3.UploadPartOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
|
@ -6304,7 +6347,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets
|
||||
// provided checksums aren't disabled
|
||||
ui.req.Metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
|
@ -6319,7 +6362,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||
if size >= 0 {
|
||||
ui.req.ContentLength = &size
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
|
|
|
@ -601,9 +601,10 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
|||
}
|
||||
|
||||
fi, err := cn.smbShare.Stat(reqDir)
|
||||
if err == nil {
|
||||
o.statResult = fi
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetModTime: stat: %w", err)
|
||||
}
|
||||
o.statResult = fi
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -685,7 +686,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
|
@ -723,7 +723,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the modified time
|
||||
// Set the modified time and also o.statResult
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||
|
|
|
@ -14,21 +14,30 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
if *wasLocked {
|
||||
// Assume a 404 error after we've received a 423 error is actually a success
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
*wasLocked = true
|
||||
fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime)
|
||||
time.Sleep(*sleepTime)
|
||||
*sleepTime *= 2
|
||||
return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs
|
|||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
sleepTime := 5 * time.Second
|
||||
wasLocked := false
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
|
|
|
@ -27,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// User is a Zoho user we are only interested in the ZUID here
|
||||
type User struct {
|
||||
// OAuthUser is a Zoho user we are only interested in the ZUID here
|
||||
type OAuthUser struct {
|
||||
FirstName string `json:"First_Name"`
|
||||
Email string `json:"Email"`
|
||||
LastName string `json:"Last_Name"`
|
||||
|
@ -36,12 +36,41 @@ type User struct {
|
|||
ZUID int64 `json:"ZUID"`
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team or workspace
|
||||
// UserInfoResponse is returned by the user info API.
|
||||
type UserInfoResponse struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"users"`
|
||||
Attributes struct {
|
||||
EmailID string `json:"email_id"`
|
||||
Edition string `json:"edition"`
|
||||
} `json:"attributes"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// PrivateSpaceInfo gives basic information about a users private folder.
|
||||
type PrivateSpaceInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CurrentTeamInfo gives information about the current user in a team.
|
||||
type CurrentTeamInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
}
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team, Workspace or Private Space
|
||||
// It's actually a VERY large json object that differs between
|
||||
// Team and Workspace but we are only interested in some fields
|
||||
// that both of them have so we can use the same struct for both
|
||||
// Team and Workspace and Private Space but we are only interested in some fields
|
||||
// that all of them have so we can use the same struct.
|
||||
type TeamWorkspace struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Attributes struct {
|
||||
Name string `json:"name"`
|
||||
Created Time `json:"created_time_in_millisecond"`
|
||||
|
@ -49,7 +78,8 @@ type TeamWorkspace struct {
|
|||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// TeamWorkspaceResponse is the response by the list teams api
|
||||
// TeamWorkspaceResponse is the response by the list teams API, list workspace API
|
||||
// or list team private spaces API.
|
||||
type TeamWorkspaceResponse struct {
|
||||
TeamWorkspace []TeamWorkspace `json:"data"`
|
||||
}
|
||||
|
@ -180,11 +210,38 @@ func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
|||
return &ufi, nil
|
||||
}
|
||||
|
||||
// LargeUploadInfo is once again a slightly different version of UploadInfo
|
||||
// returned as part of an LargeUploadResponse by the large file upload API.
|
||||
type LargeUploadInfo struct {
|
||||
Attributes struct {
|
||||
ParentID string `json:"parent_id"`
|
||||
FileName string `json:"file_name"`
|
||||
RessourceID string `json:"resource_id"`
|
||||
FileInfo string `json:"file_info"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// GetUploadFileInfo decodes the embedded FileInfo
|
||||
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||
var ufi UploadFileInfo
|
||||
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||
}
|
||||
return &ufi, nil
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a file Upload
|
||||
type UploadResponse struct {
|
||||
Uploads []UploadInfo `json:"data"`
|
||||
}
|
||||
|
||||
// LargeUploadResponse is the response returned by large file upload API.
|
||||
type LargeUploadResponse struct {
|
||||
Uploads []LargeUploadInfo `json:"data"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// WriteMetadataRequest is used to write metadata for a
|
||||
// single item
|
||||
type WriteMetadataRequest struct {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
@ -36,9 +37,11 @@ const (
|
|||
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
maxSleep = 60 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
configRootID = "root_folder_id"
|
||||
|
||||
defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -50,6 +53,7 @@ var (
|
|||
"WorkDrive.team.READ",
|
||||
"WorkDrive.workspace.READ",
|
||||
"WorkDrive.files.ALL",
|
||||
"ZohoFiles.files.ALL",
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
|
@ -61,6 +65,8 @@ var (
|
|||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
rootURL = "https://workdrive.zoho.eu/api/v1"
|
||||
downloadURL = "https://download.zoho.eu/v1/workdrive"
|
||||
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
|
||||
accountsURL = "https://accounts.zoho.eu"
|
||||
)
|
||||
|
||||
|
@ -79,7 +85,7 @@ func init() {
|
|||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err)
|
||||
}
|
||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
@ -88,12 +94,12 @@ func init() {
|
|||
|
||||
switch config.State {
|
||||
case "":
|
||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
})
|
||||
case "teams":
|
||||
case "type":
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
|
@ -108,24 +114,43 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
authSrv, apiSrv, err := getSrvs()
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the user Info
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
userInfo, err := getUserInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
// If personal Edition only one private Space is available. Directly configure that.
|
||||
if userInfo.Data.Attributes.Edition == "PERSONAL" {
|
||||
return fs.ConfigResult("private_space", userInfo.Data.ID)
|
||||
}
|
||||
// Otherwise go to team selection
|
||||
return fs.ConfigResult("team", userInfo.Data.ID)
|
||||
case "private_space":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "team":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the teams
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
teams, err := listTeams(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -143,9 +168,19 @@ func init() {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workspaces = append(workspaces, privateSpaces...)
|
||||
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
workspaceID := config.Result
|
||||
|
@ -179,7 +214,13 @@ browser.`,
|
|||
}, {
|
||||
Value: "com.au",
|
||||
Help: "Australia",
|
||||
}}}, {
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to large file upload api (>= 10 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
|
@ -193,6 +234,7 @@ browser.`,
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Region string `config:"region"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
|
@ -200,13 +242,15 @@ type Options struct {
|
|||
|
||||
// Fs represents a remote workdrive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
downloadsrv *rest.Client // the connection to the download server
|
||||
uploadsrv *rest.Client // the connection to the upload server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a Zoho WorkDrive object
|
||||
|
@ -229,6 +273,8 @@ func setupRegion(m configmap.Mapper) error {
|
|||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
|
@ -237,11 +283,63 @@ func setupRegion(m configmap.Mapper) error {
|
|||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
type workspaceInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) {
|
||||
var userInfo api.UserInfoResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/me",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &userInfo, nil
|
||||
}
|
||||
|
||||
func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) {
|
||||
var currentTeamInfo api.CurrentTeamInfo
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/currentuser",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, ¤tTeamInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ¤tTeamInfo, err
|
||||
}
|
||||
|
||||
func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var privateSpaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + teamUserID + "/privatespace",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range privateSpaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"})
|
||||
}
|
||||
return workspaceList, err
|
||||
}
|
||||
|
||||
func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var teamList api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams",
|
||||
Path: "/users/" + zuid + "/teams",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
|
||||
|
@ -251,18 +349,24 @@ func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWork
|
|||
return teamList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var workspaceList api.TeamWorkspaceResponse
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var workspaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/workspaces",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceList)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return workspaceList.TeamWorkspace, nil
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range workspaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name})
|
||||
}
|
||||
|
||||
return workspaceList, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
|
@ -285,13 +389,20 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
}
|
||||
authRetry := false
|
||||
|
||||
// Bail out early if we are missing OAuth Scopes.
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") {
|
||||
fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.")
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
if resp != nil && resp.StatusCode == 429 {
|
||||
fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err)
|
||||
time.Sleep(60 * time.Second)
|
||||
err = pacer.RetryAfterError(err, 60*time.Second)
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60)
|
||||
return true, err
|
||||
}
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
@ -389,6 +500,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < defaultUploadCutoff {
|
||||
return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff))
|
||||
}
|
||||
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -401,11 +517,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
|
||||
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
@ -643,9 +761,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
|
|||
return
|
||||
}
|
||||
|
||||
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/stream/upload",
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: "application/octet-stream",
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"x-filename": url.QueryEscape(name),
|
||||
"x-parent_id": parent,
|
||||
"override-name-exist": "true",
|
||||
"upload-id": uuid.New().String(),
|
||||
"x-streammode": "1",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var uploadResponse *api.LargeUploadResponse
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload large error: %v", err)
|
||||
}
|
||||
if len(uploadResponse.Uploads) != 1 {
|
||||
return nil, errors.New("upload: invalid response")
|
||||
}
|
||||
upload := uploadResponse.Uploads[0]
|
||||
uploadInfo, err := upload.GetUploadFileInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload error: %w", err)
|
||||
}
|
||||
|
||||
// Fill in the api.Item from the api.UploadFileInfo
|
||||
var info api.Item
|
||||
info.ID = upload.Attributes.RessourceID
|
||||
info.Attributes.Name = upload.Attributes.FileName
|
||||
// info.Attributes.Type = not used
|
||||
info.Attributes.IsFolder = false
|
||||
// info.Attributes.CreatedTime = not used
|
||||
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||
// info.Attributes.UploadedTime = 0 not used
|
||||
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||
info.Attributes.StorageInfo.FileCount = 0
|
||||
info.Attributes.StorageInfo.FolderCount = 0
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
params := url.Values{}
|
||||
params.Set("filename", name)
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
|
@ -705,21 +875,40 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(f.opt.UploadCutoff) {
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Upload the file
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
|
@ -1159,7 +1348,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.downloadsrv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1183,11 +1372,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return err
|
||||
}
|
||||
|
||||
// Overwrite the old file
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,8 @@ import (
|
|||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestZoho:",
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
RemoteName: "TestZoho:",
|
||||
SkipInvalidUTF8: true,
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@ def find_backends():
|
|||
def output_docs(backend, out, cwd):
|
||||
"""Output documentation for backend options to out"""
|
||||
out.flush()
|
||||
subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out)
|
||||
subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out)
|
||||
|
||||
def output_backend_tool_docs(backend, out, cwd):
|
||||
"""Output documentation for backend tool to out"""
|
||||
out.flush()
|
||||
subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
|
||||
def alter_doc(backend):
|
||||
"""Alter the documentation for backend"""
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -193,7 +194,7 @@ var commandDefinition = &cobra.Command{
|
|||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
os.Exit(2)
|
||||
return fserrors.FatalError(err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -21,7 +22,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ListingHeader defines first line of a listing
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code
|
||||
var ErrBisyncAborted = errors.New("bisync aborted")
|
||||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
|
|
10
cmd/cmd.go
10
cmd/cmd.go
|
@ -50,7 +50,6 @@ var (
|
|||
version bool
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
@ -495,8 +494,6 @@ func resolveExitCode(err error) {
|
|||
os.Exit(exitcode.DirNotFound)
|
||||
case errors.Is(err, fs.ErrorObjectNotFound):
|
||||
os.Exit(exitcode.FileNotFound)
|
||||
case errors.Is(err, errorUncategorized):
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case errors.Is(err, fssync.ErrorMaxDurationReached):
|
||||
|
@ -507,8 +504,10 @@ func resolveExitCode(err error) {
|
|||
os.Exit(exitcode.NoRetryError)
|
||||
case fserrors.IsFatalError(err):
|
||||
os.Exit(exitcode.FatalError)
|
||||
default:
|
||||
case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments):
|
||||
os.Exit(exitcode.UsageError)
|
||||
default:
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -536,6 +535,7 @@ func Main() {
|
|||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||
}
|
||||
fs.Fatalf(nil, "Fatal error: %v", err)
|
||||
fs.Logf(nil, "Fatal error: %v", err)
|
||||
os.Exit(exitcode.UsageError)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,9 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -386,9 +388,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
|
||||
### Rclone as Unix mount helper
|
||||
|
||||
|
|
|
@ -251,6 +251,15 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
|||
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
|
||||
case "vfs-used-is-size":
|
||||
vfsOpt.UsedIsSize, err = opt.GetBool(key)
|
||||
case "vfs-read-chunk-streams":
|
||||
intVal, err = opt.GetInt64(key)
|
||||
if err == nil {
|
||||
if intVal >= 0 && intVal <= math.MaxInt {
|
||||
vfsOpt.ChunkStreams = int(intVal)
|
||||
} else {
|
||||
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
|
||||
}
|
||||
}
|
||||
|
||||
// unprefixed vfs options
|
||||
case "no-modtime":
|
||||
|
|
|
@ -174,7 +174,7 @@ func TestCmdTest(t *testing.T) {
|
|||
// Test error and error output
|
||||
out, err = rclone("version", "--provoke-an-error")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "exit status 1")
|
||||
assert.Contains(t, err.Error(), "exit status 2")
|
||||
assert.Contains(t, out, "Error: unknown flag")
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,9 @@ package cmdtest
|
|||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -344,4 +346,42 @@ func TestEnvironmentVariables(t *testing.T) {
|
|||
env = ""
|
||||
out, err = rcloneEnv(env, "version", "-vv", "--use-json-log")
|
||||
jsonLogOK()
|
||||
|
||||
// Find all the File filter lines in out and return them
|
||||
parseFileFilters := func(out string) (extensions []string) {
|
||||
// Match: - (^|/)[^/]*\.jpg$
|
||||
find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`)
|
||||
for _, line := range strings.Split(out, "\n") {
|
||||
if m := find.FindStringSubmatch(line); m != nil {
|
||||
extensions = append(extensions, m[1])
|
||||
}
|
||||
}
|
||||
return extensions
|
||||
}
|
||||
|
||||
// Make sure that multiple valued (stringArray) environment variables are handled properly
|
||||
env = ``
|
||||
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"gif", "tif"}, parseFileFilters(out))
|
||||
|
||||
env = `RCLONE_EXCLUDE=*.jpg`
|
||||
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"jpg", "gif"}, parseFileFilters(out))
|
||||
|
||||
env = `RCLONE_EXCLUDE=*.jpg,*.png`
|
||||
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"jpg", "png", "gif", "tif"}, parseFileFilters(out))
|
||||
|
||||
env = `RCLONE_EXCLUDE="*.jpg","*.png"`
|
||||
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"jpg", "png"}, parseFileFilters(out))
|
||||
|
||||
env = `RCLONE_EXCLUDE="*.,,,","*.png"`
|
||||
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{",,,", "png"}, parseFileFilters(out))
|
||||
}
|
||||
|
|
|
@ -889,3 +889,8 @@ put them back in again.` >}}
|
|||
* Mathieu Moreau <mrx23dot@users.noreply.github.com>
|
||||
* fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com>
|
||||
* Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com>
|
||||
* Divyam <47589864+divyam234@users.noreply.github.com>
|
||||
* ttionya <ttionya@users.noreply.github.com>
|
||||
* quiescens <quiescens@gmail.com>
|
||||
* rishi.sridhar <rishi.sridhar@zohocorp.com>
|
||||
* Lawrence Murray <lawrence@indii.org>
|
||||
|
|
|
@ -180,6 +180,13 @@ If the resource has multiple user-assigned identities you will need to
|
|||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||
section](#use_msi).
|
||||
|
||||
If you are operating in disconnected clouds, or private clouds such as
|
||||
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||
This determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before authenticating.
|
||||
Setting this to `true` will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||
|
||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
|
@ -290,6 +297,16 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
|||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
|
||||
Don't set `env_auth` at the same time.
|
||||
|
||||
#### Anonymous {#anonymous}
|
||||
|
||||
If you want to access resources with public anonymous access then set
|
||||
|
|
|
@ -968,12 +968,15 @@ that while concurrent bisync runs are allowed, _be very cautious_
|
|||
that there is no overlap in the trees being synched between concurrent runs,
|
||||
lest there be replicated files, deleted files and general mayhem.
|
||||
|
||||
### Return codes
|
||||
### Exit codes
|
||||
|
||||
`rclone bisync` returns the following codes to calling program:
|
||||
- `0` on a successful run,
|
||||
- `1` for a non-critical failing run (a rerun may be successful),
|
||||
- `2` for a critically aborted run (requires a `--resync` to recover).
|
||||
- `2` on syntax or usage error,
|
||||
- `7` for a critically aborted run (requires a `--resync` to recover).
|
||||
|
||||
See also the section about [exit codes](/docs/#exit-code) in main docs.
|
||||
|
||||
### Graceful Shutdown
|
||||
|
||||
|
|
|
@ -5,6 +5,25 @@ description: "Rclone Changelog"
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.68.1 - 2024-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Fix docker release build (ttionya)
|
||||
* doc fixes (Nick Craig-Wood, Pawel Palucha)
|
||||
* fs
|
||||
* Fix `--dump filters` not always appearing (Nick Craig-Wood)
|
||||
* Fix setting `stringArray` config values from environment variables (Nick Craig-Wood)
|
||||
* rc: Fix default value of `--metrics-addr` (Nick Craig-Wood)
|
||||
* serve docker: Add missing `vfs-read-chunk-streams` option in docker volume driver (Divyam)
|
||||
* Onedrive
|
||||
* Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Fix login issue where token retrieval fails (wiserain)
|
||||
* S3
|
||||
* Fix rclone ignoring static credentials when `env_auth=true` (Nick Craig-Wood)
|
||||
|
||||
## v1.68.0 - 2024-09-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0)
|
||||
|
|
|
@ -2868,9 +2868,9 @@ messages may not be valid after the retry. If rclone has done a retry
|
|||
it will log a high priority message if the retry was successful.
|
||||
|
||||
### List of exit codes ###
|
||||
* `0` - success
|
||||
* `1` - Syntax or usage error
|
||||
* `2` - Error not otherwise categorised
|
||||
* `0` - Success
|
||||
* `1` - Error not otherwise categorised
|
||||
* `2` - Syntax or usage error
|
||||
* `3` - Directory not found
|
||||
* `4` - File not found
|
||||
* `5` - Temporary error (one that more retries might fix) (Retry errors)
|
||||
|
@ -2911,6 +2911,22 @@ so they take exactly the same form.
|
|||
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
||||
| Environment Variable | Equivalent options |
|
||||
|----------------------|--------------------|
|
||||
| `RCLONE_EXCLUDE="*.jpg"` | `--exclude "*.jpg"` |
|
||||
| `RCLONE_EXCLUDE="*.jpg,*.png"` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"*.jpg","*.png"'` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"/directory with comma , in it /**"'` | `--exclude "/directory with comma , in it /**" |
|
||||
|
||||
If `stringArray` options are defined as environment variables **and**
|
||||
options on the command line then all the values will be used.
|
||||
|
||||
### Config file ###
|
||||
|
||||
You can set defaults for values in the config file on an individual
|
||||
|
|
|
@ -502,12 +502,18 @@ is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115)
|
|||
|
||||
**The current google API does not allow photos to be downloaded at original resolution. This is very important if you are, for example, relying on "Google Photos" as a backup of your photos. You will not be able to use rclone to redownload original images. You could use 'google takeout' to recover the original photos as a last resort**
|
||||
|
||||
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
|
||||
headless browser to download images in full resolution.
|
||||
|
||||
### Downloading Videos
|
||||
|
||||
When videos are downloaded they are downloaded in a really compressed
|
||||
version of the video compared to downloading it via the Google Photos
|
||||
web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044).
|
||||
|
||||
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
|
||||
headless browser to download images in full resolution.
|
||||
|
||||
### Duplicates
|
||||
|
||||
If a file name is duplicated in a directory then rclone will add the
|
||||
|
|
|
@ -521,7 +521,7 @@ upon backend-specific capabilities.
|
|||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No |
|
||||
| Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
|
|
|
@ -401,6 +401,38 @@ there for more details.
|
|||
|
||||
Setting this flag increases the chance for undetected upload failures.
|
||||
|
||||
### Increasing performance
|
||||
|
||||
#### Using server-side copy
|
||||
|
||||
If you are copying objects between S3 buckets in the same region, you should
|
||||
use server-side copy.
|
||||
This is much faster than downloading and re-uploading the objects, as no data is transferred.
|
||||
|
||||
For rclone to use server-side copy, you must use the same remote for the source and destination.
|
||||
|
||||
rclone copy s3:source-bucket s3:destination-bucket
|
||||
|
||||
When using server-side copy, the performance is limited by the rate at which rclone issues
|
||||
API requests to S3.
|
||||
See below for how to increase the number of API requests rclone makes.
|
||||
|
||||
#### Increasing the rate of API requests
|
||||
|
||||
You can increase the rate of API requests to S3 by increasing the parallelism using `--transfers` and `--checkers`
|
||||
options.
|
||||
|
||||
Rclone uses a very conservative defaults for these settings, as not all providers support high rates of requests.
|
||||
Depending on your provider, you can increase significantly the number of transfers and checkers.
|
||||
|
||||
For example, with AWS S3, if you can increase the number of checkers to values like 200.
|
||||
If you are doing a server-side copy, you can also increase the number of transfers to 200.
|
||||
|
||||
rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket
|
||||
|
||||
You will need to experiment with these values to find the optimal settings for your setup.
|
||||
|
||||
|
||||
### Versions
|
||||
|
||||
When bucket versioning is enabled (this can be done with rclone with
|
||||
|
@ -2262,6 +2294,21 @@ You can also do this entirely on the command line
|
|||
|
||||
This is the provider used as main example and described in the [configuration](#configuration) section above.
|
||||
|
||||
### AWS Directory Buckets
|
||||
|
||||
From rclone v1.69 [Directory Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
|
||||
are supported.
|
||||
|
||||
You will need to set the `directory_buckets = true` config parameter
|
||||
or use `--s3-directory-buckets`.
|
||||
|
||||
Note that rclone cannot yet:
|
||||
|
||||
- Create directory buckets
|
||||
- List directory buckets
|
||||
|
||||
See [the --s3-directory-buckets flag](#s3-directory-buckets) for more info
|
||||
|
||||
### AWS Snowball Edge
|
||||
|
||||
[AWS Snowball](https://aws.amazon.com/snowball/) is a hardware
|
||||
|
|
|
@ -224,6 +224,17 @@ Properties:
|
|||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --zoho-upload-cutoff
|
||||
|
||||
Cutoff for switching to large file upload api (>= 10 MiB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_ZOHO_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 10Mi
|
||||
|
||||
#### --zoho-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
|
|
@ -1 +1 @@
|
|||
v1.68.0
|
||||
v1.69.0
|
|
@ -2,7 +2,7 @@
|
|||
package configstruct
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
@ -31,7 +31,7 @@ func camelToSnake(in string) string {
|
|||
//
|
||||
// Builtin types are expected to be encoding as their natural
|
||||
// stringificatons as produced by fmt.Sprint except for []string which
|
||||
// is expected to be encoded as JSON with empty array encoded as "".
|
||||
// is expected to be encoded a a CSV with empty array encoded as "".
|
||||
//
|
||||
// Any other types are expected to be encoded by their String()
|
||||
// methods and decoded by their `Set(s string) error` methods.
|
||||
|
@ -58,14 +58,18 @@ func StringToInterface(def interface{}, in string) (newValue interface{}, err er
|
|||
case time.Duration:
|
||||
newValue, err = time.ParseDuration(in)
|
||||
case []string:
|
||||
// JSON decode arrays of strings
|
||||
if in != "" {
|
||||
var out []string
|
||||
err = json.Unmarshal([]byte(in), &out)
|
||||
newValue = out
|
||||
} else {
|
||||
// Empty string we will treat as empty array
|
||||
// CSV decode arrays of strings - ideally we would use
|
||||
// fs.CommaSepList here but we can't as it would cause
|
||||
// a circular import.
|
||||
if len(in) == 0 {
|
||||
newValue = []string{}
|
||||
} else {
|
||||
r := csv.NewReader(strings.NewReader(in))
|
||||
newValue, err = r.Read()
|
||||
switch _err := err.(type) {
|
||||
case *csv.ParseError:
|
||||
err = _err.Err // remove line numbers from the error message
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Try using a Set method
|
||||
|
|
|
@ -204,9 +204,11 @@ func TestStringToInterface(t *testing.T) {
|
|||
{"1m1s", fs.Duration(0), fs.Duration(61 * time.Second), ""},
|
||||
{"1potato", fs.Duration(0), nil, `parsing "1potato" as fs.Duration failed: parsing time "1potato" as "2006-01-02": cannot parse "1potato" as "2006"`},
|
||||
{``, []string{}, []string{}, ""},
|
||||
{`[]`, []string(nil), []string{}, ""},
|
||||
{`["hello"]`, []string{}, []string{"hello"}, ""},
|
||||
{`["hello","world!"]`, []string(nil), []string{"hello", "world!"}, ""},
|
||||
{`""`, []string(nil), []string{""}, ""},
|
||||
{`hello`, []string{}, []string{"hello"}, ""},
|
||||
{`"hello"`, []string{}, []string{"hello"}, ""},
|
||||
{`hello,world!`, []string(nil), []string{"hello", "world!"}, ""},
|
||||
{`"hello","world!"`, []string(nil), []string{"hello", "world!"}, ""},
|
||||
{"1s", time.Duration(0), time.Second, ""},
|
||||
{"1m1s", time.Duration(0), 61 * time.Second, ""},
|
||||
{"1potato", time.Duration(0), nil, `parsing "1potato" as time.Duration failed: time: unknown unit "potato" in duration "1potato"`},
|
||||
|
|
|
@ -143,12 +143,32 @@ func installFlag(flags *pflag.FlagSet, name string, groupsString string) {
|
|||
// Read default from environment if possible
|
||||
envKey := fs.OptionToEnv(name)
|
||||
if envValue, envFound := os.LookupEnv(envKey); envFound {
|
||||
err := flags.Set(name, envValue)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Invalid value when setting --%s from environment variable %s=%q: %v", name, envKey, envValue, err)
|
||||
isStringArray := false
|
||||
opt, isOption := flag.Value.(*fs.Option)
|
||||
if isOption {
|
||||
_, isStringArray = opt.Default.([]string)
|
||||
}
|
||||
if isStringArray {
|
||||
// Treat stringArray differently, treating the environment variable as a CSV array
|
||||
var list fs.CommaSepList
|
||||
err := list.Set(envValue)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Invalid value when setting stringArray --%s from environment variable %s=%q: %v", name, envKey, envValue, err)
|
||||
}
|
||||
// Set both the Value (so items on the command line get added) and DefValue so the help is correct
|
||||
opt.Value = ([]string)(list)
|
||||
flag.DefValue = list.String()
|
||||
for _, v := range list {
|
||||
fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, v, envKey, envValue)
|
||||
}
|
||||
} else {
|
||||
err := flags.Set(name, envValue)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Invalid value when setting --%s from environment variable %s=%q: %v", name, envKey, envValue, err)
|
||||
}
|
||||
fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, flag.Value, envKey, envValue)
|
||||
flag.DefValue = envValue
|
||||
}
|
||||
fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, flag.Value, envKey, envValue)
|
||||
flag.DefValue = envValue
|
||||
}
|
||||
|
||||
// Add flag to Group if it is a global flag
|
||||
|
|
|
@ -85,7 +85,7 @@ var OptionsInfo = fs.Options{{
|
|||
Groups: "RC",
|
||||
}, {
|
||||
Name: "metrics_addr",
|
||||
Default: []string{""},
|
||||
Default: []string{},
|
||||
Help: "IPaddress:Port or :Port to bind metrics server to",
|
||||
Groups: "Metrics",
|
||||
}}.
|
||||
|
|
|
@ -37,7 +37,7 @@ func init() {
|
|||
// If the server wasn't configured the *Server returned may be nil
|
||||
func MetricsStart(ctx context.Context, opt *rc.Options) (*MetricsServer, error) {
|
||||
jobs.SetOpt(opt) // set the defaults for jobs
|
||||
if opt.MetricsHTTP.ListenAddr[0] != "" {
|
||||
if len(opt.MetricsHTTP.ListenAddr) > 0 {
|
||||
// Serve on the DefaultServeMux so can have global registrations appear
|
||||
s, err := newMetricsServer(ctx, opt)
|
||||
if err != nil {
|
||||
|
|
|
@ -264,14 +264,9 @@ func (o *Option) String() string {
|
|||
if len(stringArray) == 0 {
|
||||
return ""
|
||||
}
|
||||
// Encode string arrays as JSON
|
||||
// Encode string arrays as CSV
|
||||
// The default Go encoding can't be decoded uniquely
|
||||
buf, err := json.Marshal(stringArray)
|
||||
if err != nil {
|
||||
Errorf(nil, "Can't encode default value for %q key - ignoring: %v", o.Name, err)
|
||||
return "[]"
|
||||
}
|
||||
return string(buf)
|
||||
return CommaSepList(stringArray).String()
|
||||
}
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
@ -531,7 +526,22 @@ func (oi *OptionsInfo) load() error {
|
|||
// their values read from the options, environment variables and
|
||||
// command line parameters.
|
||||
func GlobalOptionsInit() error {
|
||||
for _, opt := range OptionsRegistry {
|
||||
var keys []string
|
||||
for key := range OptionsRegistry {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
// Sort alphabetically, but with "main" first
|
||||
if keys[i] == "main" {
|
||||
return true
|
||||
}
|
||||
if keys[j] == "main" {
|
||||
return false
|
||||
}
|
||||
return keys[i] < keys[j]
|
||||
})
|
||||
for _, key := range keys {
|
||||
opt := OptionsRegistry[key]
|
||||
err := opt.load()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.68.0"
|
||||
var VersionTag = "v1.69.0"
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
@ -35,6 +36,7 @@ type Backend struct {
|
|||
CleanUp bool // when running clean, run cleanup first
|
||||
Ignore []string // test names to ignore the failure of
|
||||
Tests []string // paths of tests to run, blank for all
|
||||
IgnoreTests []string // paths of tests not to run, blank for none
|
||||
ListRetries int // -list-retries if > 0
|
||||
ExtraTime float64 // factor to multiply the timeout by
|
||||
}
|
||||
|
@ -42,15 +44,15 @@ type Backend struct {
|
|||
// includeTest returns true if this backend should be included in this
|
||||
// test
|
||||
func (b *Backend) includeTest(t *Test) bool {
|
||||
// Is this test ignored
|
||||
if slices.Contains(b.IgnoreTests, t.Path) {
|
||||
return false
|
||||
}
|
||||
// Empty b.Tests imples do all of them except the ignored
|
||||
if len(b.Tests) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, testPath := range b.Tests {
|
||||
if testPath == t.Path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(b.Tests, t.Path)
|
||||
}
|
||||
|
||||
// MakeRuns creates Run objects the Backend and Test
|
||||
|
|
2
go.mod
2
go.mod
|
@ -79,7 +79,6 @@ require (
|
|||
go.etcd.io/bbolt v1.3.10
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.25.0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
golang.org/x/net v0.27.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sync v0.8.0
|
||||
|
@ -205,6 +204,7 @@ require (
|
|||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
|
|
|
@ -4,10 +4,10 @@ package exitcode
|
|||
const (
|
||||
// Success is returned when rclone finished without error.
|
||||
Success = iota
|
||||
// UsageError is returned when there was a syntax or usage error in the arguments.
|
||||
UsageError
|
||||
// UncategorizedError is returned for any error not categorised otherwise.
|
||||
UncategorizedError
|
||||
// UsageError is returned when there was a syntax or usage error in the arguments.
|
||||
UsageError
|
||||
// DirNotFound is returned when a source or destination directory is not found.
|
||||
DirNotFound
|
||||
// FileNotFound is returned when a source or destination file is not found.
|
||||
|
|
Loading…
Reference in a new issue