Merge branch 'master' into feat/extend-docker-volume-plugin-options
This commit is contained in:
commit
8b9325051e
336 changed files with 44960 additions and 33691 deletions
32
.github/workflows/build.yml
vendored
32
.github/workflows/build.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
|
@ -43,14 +43,14 @@ jobs:
|
|||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
|
@ -59,14 +59,14 @@ jobs:
|
|||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
|
@ -76,23 +76,23 @@ jobs:
|
|||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.22.0-rc.1'
|
||||
go: '>=1.23.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
@ -124,7 +124,7 @@ jobs:
|
|||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
|
@ -237,7 +237,7 @@ jobs:
|
|||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
|
@ -311,7 +311,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
|
|
@ -13,6 +13,7 @@ linters:
|
|||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
@ -98,3 +99,11 @@ linters-settings:
|
|||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- captLocal
|
||||
- commentFormatting
|
||||
- exitAfterDefer
|
||||
- ifElseChain
|
||||
- singleCaseSwitch
|
||||
|
|
|
@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
|||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
test_all -backends drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
|
@ -508,7 +508,7 @@ You'll need to modify the following files
|
|||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
|
|
@ -21,6 +21,8 @@ Current active maintainers of rclone are:
|
|||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
|
|
@ -55,7 +55,9 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
|
@ -93,6 +95,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
|
|
|
@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
|||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
|
|
@ -17,7 +17,9 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
|
@ -39,6 +41,7 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
|
|
@ -711,10 +711,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.svc
|
||||
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
|
||||
var (
|
||||
cred azcore.TokenCredential
|
||||
sharedKeyCred *service.SharedKeyCredential
|
||||
anonymous = false
|
||||
)
|
||||
switch {
|
||||
case opt.EnvAuth:
|
||||
|
@ -874,6 +875,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
|
@ -903,6 +907,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("create client failed: %w", err)
|
||||
}
|
||||
} else if anonymous {
|
||||
// Anonymous public access
|
||||
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create public client failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.svc == nil {
|
||||
|
@ -2084,7 +2094,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
|
@ -2107,7 +2116,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
|
|
|
@ -1035,12 +1035,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else {
|
||||
} else if size != o.Size() {
|
||||
// Resize the file if needed
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
}
|
||||
|
|
|
@ -299,13 +299,14 @@ type Fs struct {
|
|||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
@ -1593,7 +1594,14 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
|||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
return o.parseTimeString(Info[timeKey])
|
||||
err = o.parseTimeString(Info[timeKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
|
@ -1695,6 +1703,16 @@ func timeString(modTime time.Time) string {
|
|||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time
|
||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
|
@ -1702,12 +1720,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
modTime, err := parseTimeStringHelper(timeString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1861,6 +1879,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
@ -1958,7 +1984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
|
@ -1990,7 +2016,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
|
@ -2095,6 +2124,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||
// When metadata support is added to b2, this method will need a more generic name
|
||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k = strings.ToLower(k)
|
||||
// For now, the only metadata we're concerned with is "mtime"
|
||||
switch k {
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
default:
|
||||
// Do nothing for now
|
||||
}
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
|
@ -2126,7 +2185,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
|
|
|
@ -184,57 +184,126 @@ func TestParseTimeString(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
// This is adapted from the s3 equivalent.
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We currently have a limited amount of metadata to test with B2
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||
var headers = make(map[string]string)
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
headers[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
return headers
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||
t.Run(what, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(size)
|
||||
require.NoError(t, err)
|
||||
original := random.String(int(ss))
|
||||
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
if chunkSize != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(chunkSize)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadChunkSize(ss)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
if uploadCutoff != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(uploadCutoff)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadCutoff(ss)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||
|
||||
"mtime": "2009-05-06T04:05:06.499Z",
|
||||
}
|
||||
|
||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||
options := []fs.OpenOption{
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||
}
|
||||
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// X-Bz-Info-a & X-Bz-Info-b
|
||||
optMetadata := OpenOptionToMetaData(options)
|
||||
for k, v := range optMetadata {
|
||||
got := gotMetadata.Info[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// 1 kB regular file
|
||||
f.internalTestMetadata(t, "1kiB", "", "")
|
||||
|
||||
// 10 MiB large file
|
||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||
}
|
||||
|
||||
func sha1Sum(t *testing.T, s string) string {
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
|
|
|
@ -91,7 +91,7 @@ type largeUpload struct {
|
|||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
|
@ -104,11 +104,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
|
@ -118,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||
if newInfo == nil {
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Custom upload headers - remove header prefix since they are sent in the body
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
request.Info[k[len(headerPrefix):]] = v
|
||||
} else {
|
||||
optionsToSend = append(optionsToSend, option)
|
||||
}
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
|
@ -134,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
Options: optionsToSend,
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
|
22
backend/cache/cache.go
vendored
22
backend/cache/cache.go
vendored
|
@ -409,18 +409,16 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
32
backend/cache/cache_internal_test.go
vendored
32
backend/cache/cache_internal_test.go
vendored
|
@ -33,7 +33,7 @@ import (
|
|||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -123,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
@ -338,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -368,7 +368,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
|
@ -417,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
|
@ -708,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
@ -743,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
@ -850,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
|
@ -875,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
|
@ -891,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
|
@ -1192,7 +1192,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size = size - 32
|
||||
size -= 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
|
|
10
backend/cache/handle.go
vendored
10
backend/cache/handle.go
vendored
|
@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
chunkStart -= offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
|
@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
|
@ -415,10 +415,8 @@ func (w *worker) run() {
|
|||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
|
|
@ -987,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||
}
|
||||
}
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
|
@ -1362,7 +1363,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
|
|
@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir = dir % 256
|
||||
dir %= 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
|
@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos = pos - thisdir
|
||||
pos -= thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
|
@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
fh.buf[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
|
|
@ -2219,7 +2219,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3965,7 +3965,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
return "", hash.ErrUnsupported
|
||||
}
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
|
|
|
@ -566,7 +566,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
opt := &filter.Options{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
|
|
912
backend/filescom/filescom.go
Normal file
912
backend/filescom/filescom.go
Normal file
|
@ -0,0 +1,912 @@
|
|||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
config.Subdomain = opt.Site
|
||||
|
||||
_, err = url.Parse(config.Endpoint())
|
||||
if err != nil {
|
||||
config.Subdomain = ""
|
||||
config.EndpointOverride = opt.Site
|
||||
|
||||
_, err = url.Parse(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
item, err = f.readMetaDataForPath(ctx, remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
17
backend/filescom/filescom_test.go
Normal file
17
backend/filescom/filescom_test.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
|
@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
|||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
|||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
`, "|", "`"),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
|
311
backend/gofile/api/types.go
Normal file
311
backend/gofile/api/types.go
Normal file
|
@ -0,0 +1,311 @@
|
|||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
1633
backend/gofile/gofile.go
Normal file
1633
backend/gofile/gofile.go
Normal file
File diff suppressed because it is too large
Load diff
17
backend/gofile/gofile_test.go
Normal file
17
backend/gofile/gofile_test.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
|
@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
|
|||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path = path + expires
|
||||
path += expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
|
|
@ -59,7 +59,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||
//"utime" - read-only
|
||||
//"content-type" - read-only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
|
82
backend/local/clone_darwin.go
Normal file
82
backend/local/clone_darwin.go
Normal file
|
@ -0,0 +1,82 @@
|
|||
//go:build darwin && cgo
|
||||
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// # This is stored with the remote path given
|
||||
//
|
||||
// # It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Create destination
|
||||
dstObj := f.newObject(remote)
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = Clone(srcObj.path, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote, "server-side cloned!")
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
|
||||
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
|
||||
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
|
||||
func Clone(src, dst string) error {
|
||||
state := apfs.CopyFileStateAlloc()
|
||||
defer func() {
|
||||
if err := apfs.CopyFileStateFree(state); err != nil {
|
||||
fs.Errorf(dst, "free state error: %v", err)
|
||||
}
|
||||
}()
|
||||
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
|
||||
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
|
@ -32,9 +32,11 @@ import (
|
|||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
|
||||
// timeType allows the user to choose what exactly ModTime() returns
|
||||
type timeType = fs.Enum[timeTypeChoices]
|
||||
|
@ -78,41 +80,46 @@ supported by all file systems) under the "user.*" prefix.
|
|||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
|
||||
|
@ -122,11 +129,12 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
|||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
|
@ -140,11 +148,12 @@ some OSes.
|
|||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
|
@ -175,68 +184,96 @@ directory listing (where the initial stat value comes from on Windows)
|
|||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_clone",
|
||||
Help: `Disable reflink cloning for server-side copies.
|
||||
|
||||
Normally, for local-to-local transfers, rclone will "clone" the file when
|
||||
possible, and fall back to "copying" only when cloning is not supported.
|
||||
|
||||
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
|
||||
the original file. Unlike a "hardlink", the two files are independent and
|
||||
neither will affect the other if subsequently modified.
|
||||
|
||||
Cloning is usually preferable to copying, as it is much faster and is
|
||||
deduplicated by default (i.e. having two identical files does not consume more
|
||||
storage than having just one.) However, for use cases where data redundancy is
|
||||
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
|
||||
|
||||
Currently, cloning is only supported when using APFS on macOS (support for other
|
||||
platforms may be added in the future.)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
|
||||
Normally rclone does all operations on the mtime or Modification time.
|
||||
|
||||
|
@ -255,27 +292,29 @@ will silently replace it with the modification time which all OSes support.
|
|||
Note that setting the time will still set the modified time so this is
|
||||
only useful for reading.
|
||||
`,
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
}},
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
},
|
||||
},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
@ -296,6 +335,7 @@ type Options struct {
|
|||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
|
@ -384,6 +424,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
if opt.NoClone {
|
||||
// Disable server-side copy when --local-no-clone is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
|
@ -1568,32 +1612,47 @@ func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
var vol string
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
vol = filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
s = s[len(vol):]
|
||||
}
|
||||
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
|
||||
// Take care of the case Standard: ././‛. (the first dot means current directory)
|
||||
if enc != encoder.Standard {
|
||||
s = filepath.ToSlash(s)
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
if (p == ".") || (p == "..") {
|
||||
encoded[i] = p
|
||||
continue
|
||||
}
|
||||
part := enc.FromStandardName(p)
|
||||
changed = changed || part != p
|
||||
encoded[i] = part
|
||||
}
|
||||
if changed {
|
||||
s = strings.Join(encoded, "/")
|
||||
}
|
||||
s = filepath.FromSlash(s)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = vol + s
|
||||
}
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
if !noUNC {
|
||||
// Convert to UNC. It does nothing on non windows platforms.
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
|
|
|
@ -923,9 +923,7 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
|
|||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
files[0], files[i] = files[i], files[0]
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
|
|
|
@ -1927,7 +1927,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||
return shareURL, nil
|
||||
}
|
||||
|
||||
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
|
||||
const cnvFailMsg = "Don't know how to convert share link to direct link - returning the link as is"
|
||||
directURL := ""
|
||||
segments := strings.Split(shareURL, "/")
|
||||
switch f.driveType {
|
||||
|
|
|
@ -379,7 +379,7 @@ func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, p
|
|||
}
|
||||
|
||||
expectedMeta.Set("permissions", marshalPerms(t, perms))
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, content, true, "plain/text", expectedMeta)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, false, content, true, "plain/text", expectedMeta)
|
||||
do, ok := obj.(fs.Metadataer)
|
||||
require.True(t, ok)
|
||||
actualMeta, err := do.Metadata(ctx)
|
||||
|
|
|
@ -26,7 +26,10 @@ package quickxorhash
|
|||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import "hash"
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
|
@ -48,6 +51,11 @@ func New() hash.Hash {
|
|||
return &quickXorHash{}
|
||||
}
|
||||
|
||||
// xor dst with src
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
//
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
|
@ -58,12 +58,10 @@ func populateSSECustomerKeys(opt *Options) error {
|
|||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
} else if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
|
|
|
@ -148,7 +148,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
|||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary)
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
|
@ -279,7 +279,7 @@ func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
|||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
copy(w.md5s[start:end], (*md5binary))
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
|
|
|
@ -8,13 +8,16 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
|
@ -253,6 +256,37 @@ func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api
|
|||
return
|
||||
}
|
||||
|
||||
// getGcid retrieves Gcid cached in API server
|
||||
func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err error) {
|
||||
cid, err := calcCid(ctx, src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("cid", cid)
|
||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
|
||||
}
|
||||
|
||||
info := struct {
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return info.Gcid, nil
|
||||
}
|
||||
|
||||
// Read the gcid of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
|
@ -306,11 +340,14 @@ func readGcid(in io.Reader, size, threshold int64) (gcid string, out io.Reader,
|
|||
return
|
||||
}
|
||||
|
||||
// calcGcid calculates Gcid from reader
|
||||
//
|
||||
// Gcid is a custom hash to index a file contents
|
||||
func calcGcid(r io.Reader, size int64) (string, error) {
|
||||
calcBlockSize := func(j int64) int64 {
|
||||
var psize int64 = 0x40000
|
||||
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
|
||||
psize = psize << 1
|
||||
psize <<= 1
|
||||
}
|
||||
return psize
|
||||
}
|
||||
|
@ -330,3 +367,64 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
|||
}
|
||||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// calcCid calculates Cid from source
|
||||
//
|
||||
// Cid is a simplified version of Gcid
|
||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj == nil {
|
||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
hash := sha1.New()
|
||||
var rc io.ReadCloser
|
||||
|
||||
readHash := func(start, length int64) (err error) {
|
||||
end := start + length - 1
|
||||
if rc, err = srcObj.Open(ctx, &fs.RangeOption{Start: start, End: end}); err != nil {
|
||||
return fmt.Errorf("failed to open src with range (%d, %d): %w", start, end, err)
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
_, err = io.Copy(hash, rc)
|
||||
return err
|
||||
}
|
||||
|
||||
if size <= 0xF000 { // 61440 = 60KB
|
||||
err = readHash(0, size)
|
||||
} else { // 20KB from three different parts
|
||||
for _, start := range []int64{0, size / 3, size - 0x5000} {
|
||||
err = readHash(start, 0x5000)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to hash: %w", err)
|
||||
}
|
||||
cid = strings.ToUpper(hex.EncodeToString(hash.Sum(nil)))
|
||||
return
|
||||
}
|
||||
|
||||
// randomly generates device id used for request header 'x-device-id'
|
||||
//
|
||||
// original javascript implementation
|
||||
//
|
||||
// return "xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx".replace(/[xy]/g, (e) => {
|
||||
// const t = (16 * Math.random()) | 0;
|
||||
// return ("x" == e ? t : (3 & t) | 8).toString(16);
|
||||
// });
|
||||
func genDeviceID() string {
|
||||
base := []byte("xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx")
|
||||
for i, char := range base {
|
||||
switch char {
|
||||
case 'x':
|
||||
base[i] = fmt.Sprintf("%x", rand.Intn(16))[0]
|
||||
case 'y':
|
||||
base[i] = fmt.Sprintf("%x", rand.Intn(16)&3|8)[0]
|
||||
}
|
||||
}
|
||||
return string(base)
|
||||
}
|
||||
|
|
|
@ -37,10 +37,11 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
|
@ -70,8 +71,8 @@ const (
|
|||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = s3manager.DefaultUploadConcurrency
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -274,6 +275,7 @@ type Fs struct {
|
|||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootFolderID string // the id of the root folder
|
||||
deviceID string // device id used for api requests
|
||||
client *http.Client // authorized client
|
||||
m configmap.Mapper
|
||||
tokenMu *sync.Mutex // when renewing tokens
|
||||
|
@ -489,6 +491,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||
CanHaveEmptyDirectories: true, // can have empty directories
|
||||
NoMultiThreading: true, // can't have multiple threads downloading
|
||||
}).Fill(ctx, f)
|
||||
f.deviceID = genDeviceID()
|
||||
|
||||
if err := f.newClientWithPacer(ctx); err != nil {
|
||||
return nil, err
|
||||
|
@ -1016,6 +1019,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
parent: dirID,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
linkMu: new(sync.Mutex),
|
||||
|
@ -1048,7 +1052,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
// Create temporary object - still missing id, mimeType, gcid, md5sum
|
||||
dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1060,7 +1064,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
// Manually update info of moved object to save API calls
|
||||
dstObj.id = srcObj.id
|
||||
dstObj.mimeType = srcObj.mimeType
|
||||
dstObj.gcid = srcObj.gcid
|
||||
dstObj.md5sum = srcObj.md5sum
|
||||
dstObj.hasMetaData = true
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
|
@ -1068,16 +1077,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Update info
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't locate moved file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
@ -1117,7 +1117,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
// Create temporary object - still missing id, mimeType, gcid, md5sum
|
||||
dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1131,6 +1131,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
// Update info of the copied object with new parent but source name
|
||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
} else if err = dstObj.setMetaData(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Can't copy and change name in one step so we have to check if we have
|
||||
// the correct name after copy
|
||||
|
@ -1145,16 +1151,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Update info
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
@ -1194,32 +1191,33 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
|||
|
||||
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable) (err error) {
|
||||
p := resumable.Params
|
||||
endpoint := strings.Join(strings.Split(p.Endpoint, ".")[1:], ".") // "mypikpak.com"
|
||||
|
||||
cfg := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken),
|
||||
Region: aws.String("pikpak"),
|
||||
Endpoint: &endpoint,
|
||||
}
|
||||
sess, err := session.NewSession(cfg)
|
||||
// Create a credentials provider
|
||||
creds := credentials.NewStaticCredentialsProvider(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken)
|
||||
|
||||
cfg, err := awsconfig.LoadDefaultConfig(ctx,
|
||||
awsconfig.WithCredentialsProvider(creds),
|
||||
awsconfig.WithRegion("pikpak"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
partSize := chunksize.Calculator(name, size, s3manager.MaxUploadParts, f.opt.ChunkSize)
|
||||
|
||||
// Create an uploader with the session and custom options
|
||||
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
|
||||
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = aws.String("https://mypikpak.com/")
|
||||
})
|
||||
partSize := chunksize.Calculator(name, size, int(manager.MaxUploadParts), f.opt.ChunkSize)
|
||||
|
||||
// Create an uploader with custom options
|
||||
uploader := manager.NewUploader(client, func(u *manager.Uploader) {
|
||||
u.PartSize = int64(partSize)
|
||||
u.Concurrency = f.opt.UploadConcurrency
|
||||
})
|
||||
// Upload input parameters
|
||||
uParams := &s3manager.UploadInput{
|
||||
// Perform an upload
|
||||
_, err = uploader.Upload(ctx, &s3.PutObjectInput{
|
||||
Bucket: &p.Bucket,
|
||||
Key: &p.Key,
|
||||
Body: in,
|
||||
}
|
||||
// Perform an upload
|
||||
_, err = uploader.UploadWithContext(ctx, uParams)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1252,6 +1250,12 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
|
|||
return nil, fmt.Errorf("invalid response: %+v", new)
|
||||
} else if new.File.Phase == api.PhaseTypeComplete {
|
||||
// early return; in case of zero-byte objects
|
||||
if acc, ok := in.(*accounting.Account); ok && acc != nil {
|
||||
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
|
||||
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
|
||||
acc.ServerSideTransferStart()
|
||||
acc.ServerSideCopyEnd(size)
|
||||
}
|
||||
return new.File, nil
|
||||
}
|
||||
|
||||
|
@ -1700,19 +1704,30 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
|||
}
|
||||
|
||||
// Calculate gcid; grabbed from package jottacloud
|
||||
var gcid string
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
var wrap accounting.WrapFn
|
||||
in, wrap = accounting.UnWrap(in)
|
||||
var cleanup func()
|
||||
gcid, in, cleanup, err = readGcid(in, size, int64(o.fs.opt.HashMemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
gcid, err := o.fs.getGcid(ctx, src)
|
||||
if err != nil || gcid == "" {
|
||||
fs.Debugf(o, "calculating gcid: %v", err)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
// No buffering; directly calculate gcid from source
|
||||
rc, err := srcObj.Open(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open src: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
|
||||
if gcid, err = calcGcid(rc, srcObj.Size()); err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
}
|
||||
} else {
|
||||
var cleanup func()
|
||||
gcid, in, cleanup, err = readGcid(in, size, int64(o.fs.opt.HashMemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Wrap the accounting back onto the stream
|
||||
in = wrap(in)
|
||||
fs.Debugf(o, "gcid = %s", gcid)
|
||||
|
||||
if !withTemp {
|
||||
info, err := o.fs.upload(ctx, in, leaf, dirID, gcid, size, options...)
|
||||
|
|
397
backend/pixeldrain/api_client.go
Normal file
397
backend/pixeldrain/api_client.go
Normal file
|
@ -0,0 +1,397 @@
|
|||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// FilesystemPath is the object which is returned from the pixeldrain API when
|
||||
// running the stat command on a path. It includes the node information for all
|
||||
// the members of the path and for all the children of the requested directory.
|
||||
type FilesystemPath struct {
|
||||
Path []FilesystemNode `json:"path"`
|
||||
BaseIndex int `json:"base_index"`
|
||||
Children []FilesystemNode `json:"children"`
|
||||
}
|
||||
|
||||
// Base returns the base node of the path, this is the node that the path points
|
||||
// to
|
||||
func (fsp *FilesystemPath) Base() FilesystemNode {
|
||||
return fsp.Path[fsp.BaseIndex]
|
||||
}
|
||||
|
||||
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
|
||||
// a Path or Children slice. The Node is also returned as response from update
|
||||
// commands, if requested
|
||||
type FilesystemNode struct {
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Created time.Time `json:"created"`
|
||||
Modified time.Time `json:"modified"`
|
||||
ModeOctal string `json:"mode_octal"`
|
||||
|
||||
// File params
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileType string `json:"file_type"`
|
||||
SHA256Sum string `json:"sha256_sum"`
|
||||
|
||||
// ID is only filled in when the file/directory is publicly shared
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
|
||||
// from the API are on chronological order from old to new. A change log can be
|
||||
// requested for any directory or file, but change logging needs to be enabled
|
||||
// with the update API before any log entries will be made. Changes are logged
|
||||
// for 24 hours after logging was enabled. Each time a change log is requested
|
||||
// the timer is reset to 24 hours.
|
||||
type ChangeLog []ChangeLogEntry
|
||||
|
||||
// ChangeLogEntry is a single entry in a directory's change log. It contains the
|
||||
// time at which the change occurred. The path relative to the requested
|
||||
// directory and the action that was performend (update, move or delete). In
|
||||
// case of a move operation the new path of the file is stored in the path_new
|
||||
// field
|
||||
type ChangeLogEntry struct {
|
||||
Time time.Time `json:"time"`
|
||||
Path string `json:"path"`
|
||||
PathNew string `json:"path_new"`
|
||||
Action string `json:"action"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// UserInfo contains information about the logged in user
|
||||
type UserInfo struct {
|
||||
Username string `json:"username"`
|
||||
Subscription SubscriptionType `json:"subscription"`
|
||||
StorageSpaceUsed int64 `json:"storage_space_used"`
|
||||
}
|
||||
|
||||
// SubscriptionType contains information about a subscription type. It's not the
|
||||
// active subscription itself, only the properties of the subscription. Like the
|
||||
// perks and cost
|
||||
type SubscriptionType struct {
|
||||
Name string `json:"name"`
|
||||
StorageSpace int64 `json:"storage_space"`
|
||||
}
|
||||
|
||||
// APIError is the error type returned by the pixeldrain API
|
||||
type APIError struct {
|
||||
StatusCode string `json:"value"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e APIError) Error() string { return e.StatusCode }
|
||||
|
||||
// Generalized errors which are caught in our own handlers and translated to
|
||||
// more specific errors from the fs package.
|
||||
var (
|
||||
errNotFound = errors.New("pd api: path not found")
|
||||
errExists = errors.New("pd api: node already exists")
|
||||
errAuthenticationFailed = errors.New("pd api: authentication failed")
|
||||
)
|
||||
|
||||
func apiErrorHandler(resp *http.Response) (err error) {
|
||||
var e APIError
|
||||
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
|
||||
return fmt.Errorf("failed to parse error json: %w", err)
|
||||
}
|
||||
|
||||
// We close the body here so that the API handlers can be sure that the
|
||||
// response body is not still open when an error was returned
|
||||
if err = resp.Body.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close resp body: %w", err)
|
||||
}
|
||||
|
||||
if e.StatusCode == "path_not_found" {
|
||||
return errNotFound
|
||||
} else if e.StatusCode == "directory_not_empty" {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
} else if e.StatusCode == "node_already_exists" {
|
||||
return errExists
|
||||
} else if e.StatusCode == "authentication_failed" {
|
||||
return errAuthenticationFailed
|
||||
} else if e.StatusCode == "permission_denied" {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be
|
||||
// retried. It returns the err as a convenience so it can be used as the return
|
||||
// value in the pacer function
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
|
||||
// can understand.
|
||||
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
|
||||
params = make(url.Values)
|
||||
|
||||
if modified, ok := meta["mtime"]; ok {
|
||||
params.Set("modified", modified)
|
||||
}
|
||||
if created, ok := meta["btime"]; ok {
|
||||
params.Set("created", created)
|
||||
}
|
||||
if mode, ok := meta["mode"]; ok {
|
||||
params.Set("mode", mode)
|
||||
}
|
||||
if shared, ok := meta["shared"]; ok {
|
||||
params.Set("shared", shared)
|
||||
}
|
||||
if loggingEnabled, ok := meta["logging_enabled"]; ok {
|
||||
params.Set("logging_enabled", loggingEnabled)
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
// nodeToObject converts a single FilesystemNode API response to an object. The
|
||||
// node is usually a single element from a directory listing
|
||||
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
|
||||
// Trim the path prefix. The path prefix is hidden from rclone during all
|
||||
// operations. Saving it here would confuse rclone a lot. So instead we
|
||||
// strip it here and add it back for every API request we need to perform
|
||||
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
|
||||
return &Object{fs: f, base: node}
|
||||
}
|
||||
|
||||
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
|
||||
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
|
||||
}
|
||||
|
||||
func (f *Fs) escapePath(p string) (out string) {
|
||||
// Add the path prefix, encode all the parts and combine them together
|
||||
var parts = strings.Split(f.pathPrefix+p, "/")
|
||||
for i := range parts {
|
||||
parts[i] = url.PathEscape(parts[i])
|
||||
}
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
func (f *Fs) put(
|
||||
ctx context.Context,
|
||||
path string,
|
||||
body io.Reader,
|
||||
meta fs.Metadata,
|
||||
options []fs.OpenOption,
|
||||
) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(meta)
|
||||
|
||||
// Tell the server to automatically create parent directories if they don't
|
||||
// exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: f.escapePath(path),
|
||||
Body: body,
|
||||
Parameters: params,
|
||||
Options: options,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
Options: options,
|
||||
})
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
|
||||
return fsp, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
// To receive node info from the pixeldrain API you need to add the
|
||||
// ?stat query. Without it pixeldrain will return the file contents
|
||||
// in the URL points to a file
|
||||
Parameters: url.Values{"stat": []string{""}},
|
||||
},
|
||||
nil,
|
||||
&fsp,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
|
||||
return changeLog, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(""),
|
||||
Parameters: url.Values{
|
||||
"change_log": []string{""},
|
||||
"start": []string{start.Format(time.RFC3339Nano)},
|
||||
"end": []string{end.Format(time.RFC3339Nano)},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
&changeLog,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(fields)
|
||||
params.Set("action", "update")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(path),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(dir),
|
||||
MultipartParams: url.Values{"action": []string{"mkdirall"}},
|
||||
NoResponse: true,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
|
||||
|
||||
// Renames a file on the server side. Can be used for both directories and files
|
||||
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
// This is not a pixeldrain FS, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
|
||||
// Path is not in the same root dir, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
}
|
||||
|
||||
var params = paramsFromMetadata(meta)
|
||||
params.Set("action", "rename")
|
||||
|
||||
// The target is always in our own filesystem so here we use our
|
||||
// own pathPrefix
|
||||
params.Set("target", f.pathPrefix+to)
|
||||
|
||||
// Create parent directories if the parent directory of the file
|
||||
// does not exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
// Important: We use the source FS path prefix here
|
||||
Path: srcFs.escapePath(from),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
|
||||
var params url.Values
|
||||
if recursive {
|
||||
// Tell the server to recursively delete all child files
|
||||
params = url.Values{"recursive": []string{"true"}}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: f.escapePath(path),
|
||||
Parameters: params,
|
||||
NoResponse: true,
|
||||
},
|
||||
nil, nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
|
||||
return user, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
// The default RootURL points at the filesystem endpoint. We can't
|
||||
// use that to request user information. So here we override it to
|
||||
// the user endpoint
|
||||
RootURL: f.opt.APIURL + "/user",
|
||||
},
|
||||
nil,
|
||||
&user,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
567
backend/pixeldrain/pixeldrain.go
Normal file
567
backend/pixeldrain/pixeldrain.go
Normal file
|
@ -0,0 +1,567 @@
|
|||
// Package pixeldrain provides an interface to the Pixeldrain object storage
|
||||
// system.
|
||||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = time.RFC3339Nano
|
||||
minSleep = pacer.MinSleep(10 * time.Millisecond)
|
||||
maxSleep = pacer.MaxSleep(1 * time.Second)
|
||||
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "pixeldrain",
|
||||
Description: "Pixeldrain Filesystem",
|
||||
NewFs: NewFs,
|
||||
Config: nil,
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: "API key for your pixeldrain account.\n" +
|
||||
"Found on https://pixeldrain.com/user/api_keys.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Root of the filesystem to use.\n\n" +
|
||||
"Set to 'me' to use your personal filesystem. " +
|
||||
"Set to a shared directory ID to use a shared directory.",
|
||||
Default: "me",
|
||||
}, {
|
||||
Name: "api_url",
|
||||
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
|
||||
"this at default. It is only intended to be changed for testing purposes.",
|
||||
Default: "https://pixeldrain.com/api",
|
||||
Advanced: true,
|
||||
Required: true,
|
||||
}},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"mode": {
|
||||
Help: "File mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "755",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
},
|
||||
Help: "Pixeldrain supports file modes and creation times.",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
APIURL string `config:"api_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote, as given to NewFS
|
||||
root string // the path we are working on, as given to NewFS
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer
|
||||
loggedIn bool // if the user is authenticated
|
||||
|
||||
// Pathprefix is the directory we're working in. The pathPrefix is stripped
|
||||
// from every API response containing a path. The pathPrefix always begins
|
||||
// and ends with a slash for concatenation convenience
|
||||
pathPrefix string
|
||||
}
|
||||
|
||||
// Object describes a pixeldrain file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
base FilesystemNode // the node this object references
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Set the path prefix. This is the path to the root directory on the
|
||||
// server. We add it to each request and strip it from each response because
|
||||
// rclone does not want to see it
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
|
||||
// The root URL equates to https://pixeldrain.com/api/filesystem during
|
||||
// normal operation. API handlers need to manually add the pathPrefix to
|
||||
// each request
|
||||
f.srv.SetRoot(opt.APIURL + "/filesystem")
|
||||
|
||||
// If using an APIKey, set the Authorization header
|
||||
if len(opt.APIKey) > 0 {
|
||||
f.srv.SetUserPass("", opt.APIKey)
|
||||
|
||||
// Check if credentials are correct
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user data: %w", err)
|
||||
}
|
||||
|
||||
f.loggedIn = true
|
||||
|
||||
fs.Infof(f,
|
||||
"Logged in as '%s', subscription '%s', storage limit %d",
|
||||
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
|
||||
)
|
||||
}
|
||||
|
||||
if !f.loggedIn && opt.RootFolderID == "me" {
|
||||
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
|
||||
}
|
||||
|
||||
// Satisfy TestFsIsFile. This test expects that we throw an error if the
|
||||
// filesystem root is a file
|
||||
fsp, err := f.stat(ctx, "")
|
||||
if err != errNotFound && err != nil {
|
||||
// It doesn't matter if the root directory does not exist, as long as it
|
||||
// is not a file. This is what the test dictates
|
||||
return f, err
|
||||
} else if err == nil && fsp.Base().Type == "file" {
|
||||
// The filesystem root is a file, rclone wants us to set the root to the
|
||||
// parent directory
|
||||
f.root = path.Dir(f.root)
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fsp, err := f.stat(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "file" {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, len(fsp.Children))
|
||||
for i := range fsp.Children {
|
||||
if fsp.Children[i].Type == "dir" {
|
||||
entries[i] = f.nodeToDirectory(fsp.Children[i])
|
||||
} else {
|
||||
entries[i] = f.nodeToObject(fsp.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fsp, err := f.stat(ctx, remote)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "dir" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return f.nodeToObject(fsp.Base()), nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get object metadata")
|
||||
}
|
||||
|
||||
// Overwrite the mtime if it was not already set in the metadata
|
||||
if _, ok := meta["mtime"]; !ok {
|
||||
if meta == nil {
|
||||
meta = make(fs.Metadata)
|
||||
}
|
||||
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
|
||||
}
|
||||
|
||||
node, err := f.put(ctx, src.Remote(), in, meta, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to put object: %w", err)
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.mkdir(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
// Spec says we do not return an error if the directory already exists
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, false)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration { return time.Millisecond }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, true)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// This is not a pixeldrain object. Can't move
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return nil, fs.ErrorCantMove
|
||||
} else if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return fs.ErrorCantDirMove
|
||||
} else if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
// If the bucket ID is not /me we need to explicitly enable change logging
|
||||
// for this directory or file
|
||||
if f.pathPrefix != "/me/" {
|
||||
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
go f.changeNotify(ctx, notify, newInterval)
|
||||
}
|
||||
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
var ticker = time.NewTicker(<-newInterval)
|
||||
var lastPoll = time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case dur, ok := <-newInterval:
|
||||
if !ok {
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Polling changes at an interval of %s", dur)
|
||||
ticker.Reset(dur)
|
||||
|
||||
case t := <-ticker.C:
|
||||
clog, err := f.changeLog(ctx, lastPoll, t)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range clog {
|
||||
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
|
||||
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
|
||||
|
||||
if clog[i].Type == "dir" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
|
||||
} else if clog[i].Type == "file" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
|
||||
}
|
||||
}
|
||||
|
||||
lastPoll = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Put already supports streaming so we just use that
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the mtime metadata on a directory
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
|
||||
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
return err
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fsn.ID != "" {
|
||||
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
|
||||
|
||||
if user.Subscription.StorageSpace > -1 {
|
||||
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
if err == nil {
|
||||
o.base.Modified = modTime
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return o.fs.read(ctx, o.base.Path, options)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// Copy the parameters and update the object
|
||||
o.base.Modified = src.ModTime(ctx)
|
||||
o.base.FileSize = src.Size()
|
||||
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
|
||||
_, err = o.fs.Put(ctx, in, o, options...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.base.Path, false)
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the SHA-256 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.base.SHA256Sum, nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.base.Modified
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.base.FileSize
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known, or "" if not
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.base.FileType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return fs.Metadata{
|
||||
"mode": o.base.ModeOctal,
|
||||
"mtime": o.base.Modified.Format(timeFormat),
|
||||
"btime": o.base.Created.Format(timeFormat),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify that all the interfaces are implemented correctly
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Info = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.DirEntry = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
)
|
18
backend/pixeldrain/pixeldrain_test.go
Normal file
18
backend/pixeldrain/pixeldrain_test.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Test pixeldrain filesystem interface
|
||||
package pixeldrain_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pixeldrain"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPixeldrain:",
|
||||
NilObject: (*pixeldrain.Object)(nil),
|
||||
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
|
||||
})
|
||||
}
|
|
@ -13,7 +13,8 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// flags
|
||||
|
@ -82,15 +83,18 @@ func main() {
|
|||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
`)
|
||||
|
||||
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||
genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||
genSetFrom(new(types.Object), new(types.ObjectVersion))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||
|
|
1030
backend/s3/s3.go
1030
backend/s3/s3.go
File diff suppressed because it is too large
Load diff
|
@ -5,15 +5,17 @@ import (
|
|||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
@ -58,7 +60,17 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||
// "tier" - read only
|
||||
// "btime" - read only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
// Cloudflare insists on decompressing `Content-Encoding: gzip` unless
|
||||
// `Cache-Control: no-transform` is supplied. This is a deviation from
|
||||
// AWS but we fudge the tests here rather than breaking peoples
|
||||
// expectations of what Cloudflare does.
|
||||
//
|
||||
// This can always be overridden by using
|
||||
// `--header-upload "Cache-Control: no-transform"`
|
||||
if f.opt.Provider == "Cloudflare" {
|
||||
metadata["cache-control"] = "no-transform"
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
@ -131,20 +143,20 @@ func TestVersionLess(t *testing.T) {
|
|||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
a, b *s3.ObjectVersion
|
||||
a, b *types.ObjectVersion
|
||||
want bool
|
||||
}{
|
||||
{a: nil, b: nil, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t2}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key2, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
} {
|
||||
got := versionLess(test.a, test.b)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
|
@ -157,24 +169,24 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
versions []*s3.ObjectVersion
|
||||
markers []*s3.DeleteMarkerEntry
|
||||
want []*s3.ObjectVersion
|
||||
versions []types.ObjectVersion
|
||||
markers []types.DeleteMarkerEntry
|
||||
want []types.ObjectVersion
|
||||
}{
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{},
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
versions: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
|
@ -182,14 +194,14 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
want: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
|
@ -198,7 +210,7 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
versions: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
|
@ -208,13 +220,13 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
want: []types.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
|
@ -399,22 +411,23 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
|||
// quirk is set correctly
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.rootBucket,
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
||||
LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint),
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
_, err := f.c.CreateBucket(ctx, &req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
var errString string
|
||||
var awsError smithy.APIError
|
||||
if err == nil {
|
||||
errString = "No Error"
|
||||
} else if awsErr, ok := err.(awserr.Error); ok {
|
||||
errString = awsErr.Code()
|
||||
} else if errors.As(err, &awsError) {
|
||||
errString = awsError.ErrorCode()
|
||||
} else {
|
||||
assert.Fail(t, "Unknown error %T %v", err, err)
|
||||
}
|
||||
|
|
|
@ -4,12 +4,14 @@ package s3
|
|||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
|
@ -54,20 +56,16 @@ func TestAWSDualStackOption(t *testing.T) {
|
|||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if !strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||
|
@ -27,6 +30,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output
|
|||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
|
@ -41,8 +45,8 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers
|
|||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||
// setFrom_typesObjectVersion_typesDeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_typesObjectVersion_typesDeleteMarkerEntry(a *types.ObjectVersion, b *types.DeleteMarkerEntry) {
|
||||
a.IsLatest = b.IsLatest
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
|
@ -60,10 +64,11 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
|||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
// setFrom_typesObject_typesObjectVersion copies matching elements from a to b
|
||||
func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
|
@ -71,7 +76,6 @@ func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
|||
a.Owner = b.Owner
|
||||
a.RestoreStatus = b.RestoreStatus
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||
|
@ -82,6 +86,7 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
|||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -96,8 +101,9 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
|||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -111,7 +117,6 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -132,6 +137,7 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
|||
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.CopySource = b.CopySource
|
||||
a.Key = b.Key
|
||||
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||
|
@ -141,7 +147,6 @@ func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput,
|
|||
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||
a.Key = b.Key
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
|
@ -166,6 +171,7 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
|||
a.ETag = b.ETag
|
||||
a.Expiration = b.Expiration
|
||||
a.Expires = b.Expires
|
||||
a.ExpiresString = b.ExpiresString
|
||||
a.LastModified = b.LastModified
|
||||
a.Metadata = b.Metadata
|
||||
a.MissingMeta = b.MissingMeta
|
||||
|
@ -183,12 +189,14 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
|||
a.StorageClass = b.StorageClass
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -202,7 +210,6 @@ func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipart
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -232,6 +239,7 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
|||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
@ -246,8 +254,9 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
|||
|
||||
// setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
|
@ -261,7 +270,6 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put
|
|||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
|
@ -10,6 +11,9 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
|
@ -36,10 +40,17 @@ var s3ParamsToSign = map[string]struct{}{
|
|||
"response-content-encoding": {},
|
||||
}
|
||||
|
||||
// sign signs requests using v2 auth
|
||||
// Implement HTTPSignerV4 interface
|
||||
type v2Signer struct {
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using v2 auth.
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Cobbled together from goamz and aws-sdk-go.
|
||||
//
|
||||
// Bodged up to compile with AWS SDK v2
|
||||
func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
|
@ -107,11 +118,12 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
|||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
hash := hmac.New(sha1.New, []byte(v2.opt.SecretAccessKey))
|
||||
_, _ = hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
req.Header.Set("Authorization", "AWS "+v2.opt.AccessKeyID+":"+string(signature))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
|
|||
// This is only going to be http errors here
|
||||
return "", fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
if result.Errors != nil && len(result.Errors) > 0 {
|
||||
if len(result.Errors) > 0 {
|
||||
return "", errors.New(strings.Join(result.Errors, ", "))
|
||||
}
|
||||
if result.Token == "" {
|
||||
|
|
|
@ -75,8 +75,18 @@ func init() {
|
|||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Name: "key_pem",
|
||||
Help: `Raw PEM-encoded private key.
|
||||
|
||||
Note that this should be on a single line with line endings replaced with '\n', eg
|
||||
|
||||
key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY-----
|
||||
|
||||
This will generate the single line correctly:
|
||||
|
||||
awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa
|
||||
|
||||
If specified, it will override the key_file parameter.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
|
@ -334,7 +344,7 @@ cost of using more memory.
|
|||
Advanced: true,
|
||||
}, {
|
||||
Name: "connections",
|
||||
Help: strings.Replace(`Maximum number of SFTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.ReplaceAll(`Maximum number of SFTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
@ -348,7 +358,7 @@ maximum of |--checkers| and |--transfers|.
|
|||
So for |connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
`, "|", "`"),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
@ -561,7 +571,7 @@ type Object struct {
|
|||
fs *Fs
|
||||
remote string
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
modTime uint32 // modification time of the object as unix time
|
||||
mode os.FileMode // mode bits from the file
|
||||
md5sum *string // Cached MD5 checksum
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
|
@ -815,13 +825,13 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
|||
if cErr := c.closed(); cErr == nil {
|
||||
cErr = c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
fs.Debugf(f, "Ignoring error closing connection: %v", cErr)
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
|
@ -1957,7 +1967,7 @@ func (o *Object) Size() int64 {
|
|||
|
||||
// ModTime returns the modification time of the remote sftp file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
return time.Unix(int64(o.modTime), 0)
|
||||
}
|
||||
|
||||
// path returns the native SFTP path of the object
|
||||
|
@ -1972,7 +1982,7 @@ func (o *Object) shellPath() string {
|
|||
|
||||
// setMetadata updates the info in the object from the stat result passed in
|
||||
func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.modTime = info.Sys().(*sftp.FileStat).Mtime
|
||||
o.size = info.Size()
|
||||
o.mode = info.Mode()
|
||||
}
|
||||
|
@ -2195,7 +2205,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
// In the specific case of o.fs.opt.SetModTime == false
|
||||
// if the object wasn't found then don't return an error
|
||||
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
|
||||
o.modTime = src.ModTime(ctx)
|
||||
o.modTime = uint32(src.ModTime(ctx).Unix())
|
||||
o.size = src.Size()
|
||||
o.mode = os.FileMode(0666) // regular file
|
||||
} else if err != nil {
|
||||
|
|
|
@ -163,7 +163,7 @@ type BatchUpdateFilePropertiesRequest struct {
|
|||
// SendFilePayloadResponse represents the JSON API object that's received
|
||||
// in response to uploading a file's body to the CDN URL.
|
||||
type SendFilePayloadResponse struct {
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
Md5 string `json:"md5"`
|
||||
Message string `json:"message"`
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
//go:build go1.20
|
||||
|
||||
package union
|
||||
|
||||
import (
|
||||
|
|
|
@ -903,7 +903,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
||||
opt.Remotes[i] = opt.Remotes[i] + ":ro"
|
||||
opt.Remotes[i] += ":ro"
|
||||
}
|
||||
opt.Upstreams = opt.Remotes
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -95,6 +96,12 @@ func TestMoveCopy(t *testing.T) {
|
|||
fLocal := unionFs.upstreams[0].Fs
|
||||
fMemory := unionFs.upstreams[1].Fs
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// need to disable as this test specifically tests a local that can't Copy
|
||||
f.Features().Disable("Copy")
|
||||
fLocal.Features().Disable("Copy")
|
||||
}
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
assert.NotNil(t, f.Features().Move)
|
||||
assert.Nil(t, f.Features().Copy)
|
||||
|
|
|
@ -159,7 +159,9 @@ Set to 0 to disable chunked uploading.
|
|||
Help: "Exclude ownCloud mounted storages",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}},
|
||||
},
|
||||
fshttp.UnixSocketConfig,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -177,6 +179,7 @@ type Options struct {
|
|||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||
UnixSocket string `config:"unix_socket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
|
@ -458,7 +461,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
var client *http.Client
|
||||
if opt.UnixSocket == "" {
|
||||
client = fshttp.NewClient(ctx)
|
||||
} else {
|
||||
client = fshttp.NewClientWithUnixSocket(ctx, opt.UnixSocket)
|
||||
}
|
||||
if opt.Vendor == "sharepoint-ntlm" {
|
||||
// Disable transparent HTTP/2 support as per https://golang.org/pkg/net/http/ ,
|
||||
// otherwise any connection to IIS 10.0 fails with 'stream error: stream ID 39; HTTP_1_1_REQUIRED'
|
||||
|
@ -635,7 +643,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
|||
odrvcookie.NewRenew(12*time.Hour, func() {
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf("could not renew cookies: %s", err.Error())
|
||||
fs.Errorf(nil, "could not renew cookies: %s", err.Error())
|
||||
return
|
||||
}
|
||||
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
|
||||
|
|
|
@ -296,16 +296,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
//request object meta info
|
||||
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
|
||||
} else {
|
||||
if info.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
} else if info.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ var osarches = []string{
|
|||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
"js/wasm",
|
||||
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
|
|
|
@ -41,7 +41,9 @@ docs = [
|
|||
"combine.md",
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filescom.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
"googlephotos.md",
|
||||
|
@ -69,6 +71,7 @@ docs = [
|
|||
"swift.md",
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"pixeldrain.md",
|
||||
"premiumizeme.md",
|
||||
"protondrive.md",
|
||||
"putio.md",
|
||||
|
|
78
bin/test_backend_sizes.py
Executable file
78
bin/test_backend_sizes.py
Executable file
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the sizes in the rclone binary of each backend by compiling
|
||||
rclone with and without the backend and measuring the difference.
|
||||
|
||||
Run with no arguments to test all backends or a supply a list of
|
||||
backends to test.
|
||||
"""
|
||||
|
||||
all_backends = "backend/all/all.go"
|
||||
|
||||
# compile command which is more or less like the production builds
|
||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||
|
||||
def read_backends():
|
||||
"""
|
||||
Reads the backends file, returning a list of backends and the original file
|
||||
"""
|
||||
with open(all_backends) as fd:
|
||||
orig_all = fd.read()
|
||||
# find the backends
|
||||
backends = []
|
||||
for line in orig_all.split("\n"):
|
||||
match = match_backend.search(line)
|
||||
if match:
|
||||
backends.append(match.group(1))
|
||||
return backends, orig_all
|
||||
|
||||
def write_all(orig_all, backend):
|
||||
"""
|
||||
Write the all backends file without the backend given
|
||||
"""
|
||||
with open(all_backends, "w") as fd:
|
||||
for line in orig_all.split("\n"):
|
||||
match = re.search(r'"github.com/rclone/rclone/backend/(.*?)"', line)
|
||||
# Comment out line matching backend
|
||||
if match and match.group(1) == backend:
|
||||
line = "// " + line
|
||||
fd.write(line+"\n")
|
||||
|
||||
def compile():
|
||||
"""
|
||||
Compile the binary, returning the size
|
||||
"""
|
||||
subprocess.check_call(compile_command)
|
||||
return os.stat("rclone").st_size
|
||||
|
||||
def main():
|
||||
# change directory to the one with this script in
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
# change directory to the main rclone source
|
||||
os.chdir("..")
|
||||
|
||||
to_test = sys.argv[1:]
|
||||
backends, orig_all = read_backends()
|
||||
if len(to_test) == 0:
|
||||
to_test = backends
|
||||
# Compile with all backends
|
||||
ref = compile()
|
||||
print(f"Total binary size {ref/1024/1024:.3f} MiB")
|
||||
print("Backend,Size MiB")
|
||||
for test_backend in to_test:
|
||||
write_all(orig_all, test_backend)
|
||||
new_size = compile()
|
||||
print(f"{test_backend},{(ref-new_size)/1024/1024:.3f}")
|
||||
# restore all file
|
||||
with open(all_backends, "w") as fd:
|
||||
fd.write(orig_all)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -46,8 +46,7 @@ func printValue(what string, uv *int64, isSize bool) {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "about remote:",
|
||||
Short: `Get quota information from the remote.`,
|
||||
Long: `
|
||||
` + "`rclone about`" + ` prints quota information about a remote to standard
|
||||
Long: `Prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
|
|
@ -25,8 +25,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
|
|
|
@ -31,8 +31,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "backend <command> remote:path [opts] <args>",
|
||||
Short: `Run a backend-specific command.`,
|
||||
Long: `
|
||||
This runs a backend-specific command. The commands themselves (except
|
||||
Long: `This runs a backend-specific command. The commands themselves (except
|
||||
for "help" and "features") are defined by the backends and you should
|
||||
see the backend docs for definitions.
|
||||
|
||||
|
|
|
@ -1304,7 +1304,7 @@ func touchFiles(ctx context.Context, dateStr string, f fs.Fs, dir, glob string)
|
|||
return files, fmt.Errorf("invalid date %q: %w", dateStr, err)
|
||||
}
|
||||
|
||||
matcher, firstErr := filter.GlobToRegexp(glob, false)
|
||||
matcher, firstErr := filter.GlobPathToRegexp(glob, false)
|
||||
if firstErr != nil {
|
||||
return files, fmt.Errorf("invalid glob %q", glob)
|
||||
}
|
||||
|
@ -1742,8 +1742,8 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
|||
b.path2, "{path2/}",
|
||||
b.replaceHex(b.path1), "{path1/}",
|
||||
b.replaceHex(b.path2), "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
"//?/" + strings.TrimSuffix(strings.ReplaceAll(b.path1, slash, "/"), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.ReplaceAll(b.path2, slash, "/"), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.workDir, "{workdir}",
|
||||
|
|
|
@ -107,10 +107,10 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
|
|||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Debugf(src, err.Error())
|
||||
fs.Debugf(src, "%s", err.Error())
|
||||
// using same error msg as CheckFn so integration tests match
|
||||
err = fmt.Errorf("%v differ", hashType)
|
||||
fs.Errorf(src, err.Error())
|
||||
fs.Errorf(src, "%s", err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
|
|
|
@ -62,42 +62,41 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
|||
b.setHashType(ci)
|
||||
}
|
||||
|
||||
// Checks and Warnings
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
|
@ -124,13 +123,13 @@ func sizeDiffers(a, b int64) bool {
|
|||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -152,7 +151,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
|
@ -160,7 +159,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
|
@ -168,25 +167,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
|
@ -233,7 +232,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
|||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,14 +284,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
|||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
|
|
|
@ -190,51 +190,49 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else {
|
||||
} else if !now.isDir(file) {
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
if !now.isDir(file) {
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
|
||||
if d.is(deltaModified) {
|
||||
|
|
|
@ -39,8 +39,8 @@ func (b *bisyncRun) indent(tag, file, msg string) {
|
|||
tag = Color(terminal.BlueFg, tag)
|
||||
}
|
||||
msg = Color(terminal.MagentaFg, msg)
|
||||
msg = strings.Replace(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"), -1)
|
||||
msg = strings.Replace(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"), -1)
|
||||
msg = strings.ReplaceAll(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"))
|
||||
msg = strings.ReplaceAll(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"))
|
||||
file = Color(terminal.CyanFg, escapePath(file, false))
|
||||
logf(nil, "- %-18s%-43s - %s", tag, msg, file)
|
||||
}
|
||||
|
|
|
@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
|
@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
|
@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
|
||||
} else {
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
|
@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
|
@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
|
@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
|
@ -476,10 +476,8 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
|||
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
||||
b.indent("ERROR", file, "Path1 file not found in Path2")
|
||||
ok = false
|
||||
} else {
|
||||
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
}
|
||||
} else if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
for _, file := range files2.list {
|
||||
|
@ -569,7 +567,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
|
|||
|
||||
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
|
||||
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
||||
err = fmt.Errorf(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
return err
|
||||
}
|
||||
// need to test our BackupDirs too, as sync will be fooled by our --files-from filters
|
||||
|
@ -625,7 +623,7 @@ func (b *bisyncRun) checkSyntax() error {
|
|||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
|||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
|||
if winningPath > 0 {
|
||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||
} else {
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined."))
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined.")) //nolint:govet
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
// and either flag is sufficient without the other.
|
||||
func (b *bisyncRun) setResyncDefaults() {
|
||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) //nolint:govet
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if b.opt.ResyncMode != PreferNone {
|
||||
|
|
|
@ -20,8 +20,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "cachestats source:",
|
||||
Short: `Print cache stats for a remote`,
|
||||
Long: `
|
||||
Print cache stats for a remote in JSON format
|
||||
Long: `Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Annotations: map[string]string{
|
||||
|
|
|
@ -39,8 +39,7 @@ var commandDefinition = &cobra.Command{
|
|||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
rclone cat sends any files to standard output.
|
||||
Long: strings.ReplaceAll(`Sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
|
|
|
@ -138,8 +138,7 @@ func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Checks the files in the source and destination match. It compares
|
||||
Long: strings.ReplaceAll(`Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files that don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
|
|
|
@ -26,8 +26,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "checksum <hash> sumfile dst:path",
|
||||
Short: `Checks the files in the destination against a SUM file.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Checks that hashsums of destination files match the SUM file.
|
||||
Long: strings.ReplaceAll(`Checks that hashsums of destination files match the SUM file.
|
||||
It compares hashes (MD5, SHA1, etc) and logs a report of files which
|
||||
don't match. It doesn't alter the file system.
|
||||
|
||||
|
|
|
@ -16,8 +16,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
|
|
63
cmd/cmd.go
63
cmd/cmd.go
|
@ -14,7 +14,6 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
|
@ -29,11 +28,10 @@ import (
|
|||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/filter/filterflags"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
fslog "github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
fssync "github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
|
@ -50,7 +48,6 @@ var (
|
|||
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file", "Debugging")
|
||||
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file", "Debugging")
|
||||
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)", "Logging")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second", "Logging")
|
||||
version bool
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
|
@ -119,7 +116,7 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
|||
if !fi.InActive() {
|
||||
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := fi.AddFile(fileName)
|
||||
|
@ -383,6 +380,12 @@ func StartStats() func() {
|
|||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
// Set the global options from the flags
|
||||
err := fs.GlobalOptionsInit()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialise global options: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
|
@ -409,12 +412,6 @@ func initConfig() {
|
|||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
|
@ -424,7 +421,7 @@ func initConfig() {
|
|||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
_, err = rcserver.Start(context.Background(), &rcflags.Opt)
|
||||
_, err = rcserver.Start(context.Background(), &rc.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start remote control: %v", err)
|
||||
}
|
||||
|
@ -473,13 +470,6 @@ func initConfig() {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); !m {
|
||||
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
|
||||
ci.DataRateUnit = "bytes"
|
||||
} else {
|
||||
ci.DataRateUnit = *dataRateUnit
|
||||
}
|
||||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
|
@ -522,41 +512,12 @@ var backendFlags map[string]struct{}
|
|||
func AddBackendFlags() {
|
||||
backendFlags = map[string]struct{}{}
|
||||
for _, fsInfo := range fs.Registry {
|
||||
done := map[string]struct{}{}
|
||||
flags.AddFlagsFromOptions(pflag.CommandLine, fsInfo.Prefix, fsInfo.Options)
|
||||
// Store the backend flag names for the help generator
|
||||
for i := range fsInfo.Options {
|
||||
opt := &fsInfo.Options[i]
|
||||
// Skip if done already (e.g. with Provider options)
|
||||
if _, doneAlready := done[opt.Name]; doneAlready {
|
||||
continue
|
||||
}
|
||||
done[opt.Name] = struct{}{}
|
||||
// Make a flag from each option
|
||||
name := opt.FlagName(fsInfo.Prefix)
|
||||
found := pflag.CommandLine.Lookup(name) != nil
|
||||
if !found {
|
||||
// Take first line of help only
|
||||
help := strings.TrimSpace(opt.Help)
|
||||
if nl := strings.IndexRune(help, '\n'); nl >= 0 {
|
||||
help = help[:nl]
|
||||
}
|
||||
help = strings.TrimRight(strings.TrimSpace(help), ".!?")
|
||||
if opt.IsPassword {
|
||||
help += " (obscured)"
|
||||
}
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
flags.SetDefaultFromEnv(pflag.CommandLine, name)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
// Hide on the command line if requested
|
||||
if opt.Hide&fs.OptionHideCommandLine != 0 {
|
||||
flag.Hidden = true
|
||||
}
|
||||
backendFlags[name] = struct{}{}
|
||||
} else {
|
||||
fs.Errorf(nil, "Not adding duplicate flag --%s", name)
|
||||
}
|
||||
// flag.Hidden = true
|
||||
backendFlags[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ func findOption(name string, options []string) (found bool) {
|
|||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", opt.AttrTimeout.Seconds()),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", time.Duration(opt.AttrTimeout).Seconds()),
|
||||
}
|
||||
if opt.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
|
@ -79,7 +79,7 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
|
|
|
@ -268,8 +268,7 @@ as a readable demonstration.
|
|||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create name type [key value]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Create a new remote of |name| with |type| and options. The options
|
||||
Long: strings.ReplaceAll(`Create a new remote of |name| with |type| and options. The options
|
||||
should be passed in pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
|
@ -334,8 +333,7 @@ func init() {
|
|||
var configUpdateCommand = &cobra.Command{
|
||||
Use: "update name [key value]+",
|
||||
Short: `Update options in an existing remote.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's options. The options should be passed in
|
||||
Long: strings.ReplaceAll(`Update an existing remote's options. The options should be passed in
|
||||
pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example, to update the env_auth field of a remote of name myremote
|
||||
|
@ -379,8 +377,7 @@ var configDeleteCommand = &cobra.Command{
|
|||
var configPasswordCommand = &cobra.Command{
|
||||
Use: "password name [key value]+",
|
||||
Short: `Update password in an existing remote.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's password. The password
|
||||
Long: strings.ReplaceAll(`Update an existing remote's password. The password
|
||||
should be passed in pairs of |key| |password| or as |key=password|.
|
||||
The |password| should be passed in in clear (unobscured).
|
||||
|
||||
|
@ -435,8 +432,7 @@ func argsToMap(args []string) (out rc.Params, err error) {
|
|||
var configReconnectCommand = &cobra.Command{
|
||||
Use: "reconnect remote:",
|
||||
Short: `Re-authenticates user with remote.`,
|
||||
Long: `
|
||||
This reconnects remote: passed in to the cloud storage system.
|
||||
Long: `This reconnects remote: passed in to the cloud storage system.
|
||||
|
||||
To disconnect the remote use "rclone config disconnect".
|
||||
|
||||
|
@ -456,8 +452,7 @@ This normally means going through the interactive oauth flow again.
|
|||
var configDisconnectCommand = &cobra.Command{
|
||||
Use: "disconnect remote:",
|
||||
Short: `Disconnects user from remote`,
|
||||
Long: `
|
||||
This disconnects the remote: passed in to the cloud storage system.
|
||||
Long: `This disconnects the remote: passed in to the cloud storage system.
|
||||
|
||||
This normally means revoking the oauth token.
|
||||
|
||||
|
@ -489,8 +484,7 @@ func init() {
|
|||
var configUserInfoCommand = &cobra.Command{
|
||||
Use: "userinfo remote:",
|
||||
Short: `Prints info about logged in user of remote.`,
|
||||
Long: `
|
||||
This prints the details of the person logged in to the cloud storage
|
||||
Long: `This prints the details of the person logged in to the cloud storage
|
||||
system.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
|
|
@ -26,8 +26,7 @@ var commandDefinition = &cobra.Command{
|
|||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping identical files.`,
|
||||
// Note: "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
Copy the source to the destination. Does not transfer files that are
|
||||
Long: strings.ReplaceAll(`Copy the source to the destination. Does not transfer files that are
|
||||
identical on source and destination, testing by size and modification
|
||||
time or MD5SUM. Doesn't delete files from the destination. If you
|
||||
want to also delete files from destination, to make it match source,
|
||||
|
|
|
@ -17,8 +17,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping identical files.`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
Long: `If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
This can be used to upload single files to other than their current
|
||||
|
|
|
@ -36,8 +36,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyurl https://example.com dest:path",
|
||||
Short: `Copy the contents of the URL supplied content to dest:path.`,
|
||||
Long: strings.ReplaceAll(`
|
||||
Download a URL's content and copy it to the destination without saving
|
||||
Long: strings.ReplaceAll(`Download a URL's content and copy it to the destination without saving
|
||||
it in temporary storage.
|
||||
|
||||
Setting |--auto-filename| will attempt to automatically determine the
|
||||
|
|
|
@ -23,10 +23,9 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
||||
Long: `
|
||||
rclone cryptcheck checks a remote against a [crypted](/crypt/) remote.
|
||||
This is the equivalent of running rclone [check](/commands/rclone_check/),
|
||||
but able to check the checksums of the encrypted remote.
|
||||
Long: `Checks a remote against a [crypted](/crypt/) remote. This is the equivalent
|
||||
of running rclone [check](/commands/rclone_check/), but able to check the
|
||||
checksums of the encrypted remote.
|
||||
|
||||
For it to work the underlying remote of the cryptedremote must support
|
||||
some kind of checksum.
|
||||
|
@ -104,7 +103,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
|||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Errorf(src, err.Error())
|
||||
fs.Errorf(src, "%s", err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
|
|
|
@ -26,9 +26,8 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptdecode encryptedremote: encryptedfilename",
|
||||
Short: `Cryptdecode returns unencrypted file names.`,
|
||||
Long: `
|
||||
rclone cryptdecode returns unencrypted file names when provided with
|
||||
a list of encrypted file names. List limit is 10 items.
|
||||
Long: `Returns unencrypted file names when provided with a list of encrypted file
|
||||
names. List limit is 10 items.
|
||||
|
||||
If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file names.
|
||||
|
||||
|
|
|
@ -27,9 +27,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate filenames and delete/rename them.`,
|
||||
Long: `
|
||||
|
||||
By default ` + "`dedupe`" + ` interactively finds files with duplicate
|
||||
Long: `By default ` + "`dedupe`" + ` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different. This is known as deduping by name.
|
||||
|
||||
|
|
|
@ -25,8 +25,7 @@ var commandDefinition = &cobra.Command{
|
|||
Use: "delete remote:path",
|
||||
Short: `Remove the files in path.`,
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
Remove the files in path. Unlike [purge](/commands/rclone_purge/) it
|
||||
Long: strings.ReplaceAll(`Remove the files in path. Unlike [purge](/commands/rclone_purge/) it
|
||||
obeys include/exclude filters so can be used to selectively delete files.
|
||||
|
||||
|rclone delete| only deletes files but leaves the directory structure
|
||||
|
|
|
@ -18,8 +18,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `
|
||||
Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Package genautocomplete provides the genautocomplete command.
|
||||
// Package genautocomplete provides the completion command.
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
|
@ -13,8 +13,7 @@ func init() {
|
|||
var completionDefinition = &cobra.Command{
|
||||
Use: "completion [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `
|
||||
Generates a shell completion script for rclone.
|
||||
Long: `Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
|
|
|
@ -15,12 +15,11 @@ func init() {
|
|||
var bashCommandDefinition = &cobra.Command{
|
||||
Use: "bash [output_file]",
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a bash shell autocompletion script for rclone.
|
||||
Long: `Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
By default, when run without any arguments,
|
||||
|
||||
rclone genautocomplete bash
|
||||
rclone completion bash
|
||||
|
||||
the generated script will be written to
|
||||
|
||||
|
|
|
@ -15,13 +15,12 @@ func init() {
|
|||
var fishCommandDefinition = &cobra.Command{
|
||||
Use: "fish [output_file]",
|
||||
Short: `Output fish completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a fish autocompletion script for rclone.
|
||||
Long: `Generates a fish autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone genautocomplete fish
|
||||
sudo rclone completion fish
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
|
|
@ -15,8 +15,7 @@ func init() {
|
|||
var powershellCommandDefinition = &cobra.Command{
|
||||
Use: "powershell [output_file]",
|
||||
Short: `Output powershell completion script for rclone.`,
|
||||
Long: `
|
||||
Generate the autocompletion script for powershell.
|
||||
Long: `Generate the autocompletion script for powershell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
|
|
|
@ -15,13 +15,12 @@ func init() {
|
|||
var zshCommandDefinition = &cobra.Command{
|
||||
Use: "zsh [output_file]",
|
||||
Short: `Output zsh completion script for rclone.`,
|
||||
Long: `
|
||||
Generates a zsh autocompletion script for rclone.
|
||||
Long: `Generates a zsh autocompletion script for rclone.
|
||||
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone genautocomplete zsh
|
||||
sudo rclone completion zsh
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
|
|
@ -30,12 +30,19 @@ type frontmatter struct {
|
|||
Title string
|
||||
Description string
|
||||
Source string
|
||||
Aliases []string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
{{- if .Aliases }}
|
||||
aliases:
|
||||
{{- range $value := .Aliases }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
|
@ -46,8 +53,7 @@ description: "{{ .Description }}"
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "gendocs output_directory",
|
||||
Short: `Output markdown docs for rclone to the directory supplied.`,
|
||||
Long: `
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
Long: `This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
Annotations: map[string]string{
|
||||
|
@ -82,23 +88,37 @@ rclone.org website.`,
|
|||
// Look up name => details for prepender
|
||||
type commandDetails struct {
|
||||
Short string
|
||||
Aliases []string
|
||||
Annotations map[string]string
|
||||
}
|
||||
var commands = map[string]commandDetails{}
|
||||
var aliases []string
|
||||
var addCommandDetails func(root *cobra.Command)
|
||||
addCommandDetails = func(root *cobra.Command) {
|
||||
var addCommandDetails func(root *cobra.Command, parentAliases []string)
|
||||
addCommandDetails = func(root *cobra.Command, parentAliases []string) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
var aliases []string
|
||||
for _, p := range parentAliases {
|
||||
aliases = append(aliases, p+" "+root.Name())
|
||||
for _, v := range root.Aliases {
|
||||
aliases = append(aliases, p+" "+v)
|
||||
}
|
||||
}
|
||||
for _, v := range root.Aliases {
|
||||
if root.HasParent() {
|
||||
aliases = append(aliases, root.Parent().CommandPath()+" "+v)
|
||||
} else {
|
||||
aliases = append(aliases, v)
|
||||
}
|
||||
}
|
||||
commands[name] = commandDetails{
|
||||
Short: root.Short,
|
||||
Aliases: aliases,
|
||||
Annotations: root.Annotations,
|
||||
}
|
||||
aliases = append(aliases, root.Aliases...)
|
||||
for _, c := range root.Commands() {
|
||||
addCommandDetails(c)
|
||||
addCommandDetails(c, aliases)
|
||||
}
|
||||
}
|
||||
addCommandDetails(cmd.Root)
|
||||
addCommandDetails(cmd.Root, []string{})
|
||||
|
||||
// markup for the docs files
|
||||
prepender := func(filename string) string {
|
||||
|
@ -109,8 +129,12 @@ rclone.org website.`,
|
|||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: commands[name].Short,
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
Aliases: []string{},
|
||||
Annotations: map[string]string{},
|
||||
}
|
||||
for _, v := range commands[name].Aliases {
|
||||
data.Aliases = append(data.Aliases, "/commands/"+strings.ReplaceAll(v, " ", "_")+"/")
|
||||
}
|
||||
// Filter out annotations that confuse hugo from the frontmatter
|
||||
for k, v := range commands[name].Annotations {
|
||||
if k != "groups" {
|
||||
|
@ -145,12 +169,6 @@ rclone.org website.`,
|
|||
name := filepath.Base(path)
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
// Avoid man pages which are for aliases. This is a bit messy!
|
||||
for _, alias := range aliases {
|
||||
if strings.Contains(name, alias) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("didn't find command for %q", name)
|
||||
}
|
||||
b, err := os.ReadFile(path)
|
||||
|
@ -159,33 +177,36 @@ rclone.org website.`,
|
|||
}
|
||||
doc := string(b)
|
||||
|
||||
var out strings.Builder
|
||||
if groupsString := cmd.Annotations["groups"]; groupsString != "" {
|
||||
groups := flags.All.Include(groupsString)
|
||||
for _, group := range groups.Groups {
|
||||
if group.Flags.HasFlags() {
|
||||
_, _ = fmt.Fprintf(&out, "\n### %s Options\n\n", group.Name)
|
||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||
_, _ = fmt.Fprintln(&out, "```")
|
||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||
_, _ = fmt.Fprintln(&out, "```")
|
||||
}
|
||||
}
|
||||
}
|
||||
_, _ = out.WriteString(`
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
`)
|
||||
|
||||
startCut := strings.Index(doc, `### Options inherited from parent commands`)
|
||||
endCut := strings.Index(doc, `## SEE ALSO`)
|
||||
endCut := strings.Index(doc, `### SEE ALSO`)
|
||||
if startCut < 0 || endCut < 0 {
|
||||
if name == "rclone.md" {
|
||||
return nil
|
||||
if name != "rclone.md" {
|
||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||
}
|
||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||
if endCut >= 0 {
|
||||
doc = doc[:endCut] + "### See Also" + doc[endCut+12:]
|
||||
}
|
||||
} else {
|
||||
var out strings.Builder
|
||||
if groupsString := cmd.Annotations["groups"]; groupsString != "" {
|
||||
_, _ = out.WriteString("Options shared with other commands are described next.\n")
|
||||
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
||||
groups := flags.All.Include(groupsString)
|
||||
for _, group := range groups.Groups {
|
||||
if group.Flags.HasFlags() {
|
||||
_, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name)
|
||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||
_, _ = out.WriteString("```\n")
|
||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||
_, _ = out.WriteString("```\n\n")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
||||
}
|
||||
doc = doc[:startCut] + out.String() + "### See Also" + doc[endCut+12:]
|
||||
}
|
||||
doc = doc[:startCut] + out.String() + doc[endCut:]
|
||||
|
||||
// outdent all the titles by one
|
||||
doc = outdentTitle.ReplaceAllString(doc, `$1`)
|
||||
err = os.WriteFile(path, []byte(doc), 0777)
|
||||
|
|
|
@ -157,7 +157,7 @@ type server struct {
|
|||
}
|
||||
|
||||
func (s *server) sendMsg(msg string) {
|
||||
msg = msg + "\n"
|
||||
msg += "\n"
|
||||
if _, err := io.WriteString(s.writer, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -84,8 +84,7 @@ func CreateFromStdinArg(ht hash.Type, args []string, startArg int) (bool, error)
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "hashsum [<hash> remote:path]",
|
||||
Short: `Produces a hashsum file for all the objects in the path.`,
|
||||
Long: `
|
||||
Produces a hash file for all the objects in the path using the hash
|
||||
Long: `Produces a hash file for all the objects in the path using the hash
|
||||
named. The output is in the same format as the standard
|
||||
md5sum/sha1sum tool.
|
||||
|
||||
|
|
76
cmd/help.go
76
cmd/help.go
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/filter/filterflags"
|
||||
"github.com/rclone/rclone/fs/log/logflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
|
@ -26,8 +27,7 @@ import (
|
|||
var Root = &cobra.Command{
|
||||
Use: "rclone",
|
||||
Short: "Show help for rclone commands, flags and backends.",
|
||||
Long: `
|
||||
Rclone syncs files to and from cloud storage providers as well as
|
||||
Long: `Rclone syncs files to and from cloud storage providers as well as
|
||||
mounting them, listing them in lots of different ways.
|
||||
|
||||
See the home page (https://rclone.org/) for installation, usage,
|
||||
|
@ -58,23 +58,34 @@ var helpCommand = &cobra.Command{
|
|||
}
|
||||
|
||||
// to filter the flags with
|
||||
var flagsRe *regexp.Regexp
|
||||
var (
|
||||
filterFlagsGroup string
|
||||
filterFlagsRe *regexp.Regexp
|
||||
filterFlagsNamesOnly bool
|
||||
)
|
||||
|
||||
// Show the flags
|
||||
var helpFlags = &cobra.Command{
|
||||
Use: "flags [<regexp to match>]",
|
||||
Use: "flags [<filter>]",
|
||||
Short: "Show the global flags for rclone",
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
if len(args) > 0 {
|
||||
re, err := regexp.Compile(`(?i)` + args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to compile flags regexp: %v", err)
|
||||
}
|
||||
flagsRe = re
|
||||
}
|
||||
command.Flags()
|
||||
if GeneratingDocs {
|
||||
Root.SetUsageTemplate(docFlagsTemplate)
|
||||
} else {
|
||||
if len(args) > 0 {
|
||||
re, err := filter.GlobStringToRegexp(args[0], false, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid flag filter: %v", err)
|
||||
}
|
||||
fs.Debugf(nil, "Flag filter: %s", re.String())
|
||||
filterFlagsRe = re
|
||||
}
|
||||
if filterFlagsGroup != "" {
|
||||
Root.SetUsageTemplate(filterFlagsSingleGroupTemplate)
|
||||
} else if len(args) > 0 {
|
||||
Root.SetUsageTemplate(filterFlagsMultiGroupTemplate)
|
||||
}
|
||||
Root.SetOutput(os.Stdout)
|
||||
}
|
||||
_ = command.Usage()
|
||||
|
@ -146,7 +157,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
|||
})
|
||||
cobra.AddTemplateFunc("flagGroups", func(cmd *cobra.Command) []*flags.Group {
|
||||
// Add the backend flags and check all flags
|
||||
backendGroup := flags.All.NewGroup("Backend", "Backend only flags. These can be set in the config file also.")
|
||||
backendGroup := flags.All.NewGroup("Backend", "Backend-only flags (these can be set in the config file also)")
|
||||
allRegistered := flags.All.AllRegistered()
|
||||
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
if _, ok := backendFlags[flag.Name]; ok {
|
||||
|
@ -157,7 +168,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
|||
fs.Errorf(nil, "Flag --%s is unknown", flag.Name)
|
||||
}
|
||||
})
|
||||
groups := flags.All.Filter(flagsRe).Include(cmd.Annotations["groups"])
|
||||
groups := flags.All.Filter(filterFlagsGroup, filterFlagsRe, filterFlagsNamesOnly).Include(cmd.Annotations["groups"])
|
||||
return groups.Groups
|
||||
})
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
|
@ -169,6 +180,9 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
|||
|
||||
rootCmd.AddCommand(helpCommand)
|
||||
helpCommand.AddCommand(helpFlags)
|
||||
helpFlagsFlags := helpFlags.Flags()
|
||||
flags.StringVarP(helpFlagsFlags, &filterFlagsGroup, "group", "", "", "Only include flags from specific group", "")
|
||||
flags.BoolVarP(helpFlagsFlags, &filterFlagsNamesOnly, "name", "", false, "Apply filter only on flag names", "")
|
||||
helpCommand.AddCommand(helpBackends)
|
||||
helpCommand.AddCommand(helpBackend)
|
||||
|
||||
|
@ -201,20 +215,15 @@ Aliases:
|
|||
Examples:
|
||||
{{.Example}}{{end}}{{if and (showCommands .) .HasAvailableSubCommands}}
|
||||
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if and (showLocalFlags .) .HasAvailableLocalFlags}}
|
||||
Available commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding}} {{.Short}}{{end}}{{end}}{{end}}{{if and (showLocalFlags .) .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if and (showGlobalFlags .) .HasAvailableInheritedFlags}}
|
||||
|
||||
{{ range flagGroups . }}{{ if .Flags.HasFlags }}
|
||||
# {{ .Name }} Flags
|
||||
|
||||
{{ .Help }}
|
||||
|
||||
{{ .Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{ end }}{{ end }}
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if and (showGlobalFlags .) .HasAvailableInheritedFlags}}{{range flagGroups .}}{{if .Flags.HasFlags}}
|
||||
|
||||
{{.Help}} (flag group {{.Name}}):
|
||||
{{.Flags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{end}}{{end}}{{if .HasHelpSubCommands}}
|
||||
|
||||
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
|
||||
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}
|
||||
|
||||
|
@ -223,6 +232,15 @@ Use "rclone help flags" for to see the global flags.
|
|||
Use "rclone help backends" for a list of supported services.
|
||||
`
|
||||
|
||||
var filterFlagsSingleGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}}{{.Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var filterFlagsMultiGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}}{{.Help}} (flag group {{.Name}}):
|
||||
{{.Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
|
||||
{{end}}{{end}}`
|
||||
|
||||
var docFlagsTemplate = `---
|
||||
title: "Global Flags"
|
||||
description: "Rclone Global Flags"
|
||||
|
@ -233,16 +251,16 @@ description: "Rclone Global Flags"
|
|||
This describes the global flags available to every rclone command
|
||||
split into groups.
|
||||
|
||||
{{ range flagGroups . }}{{ if .Flags.HasFlags }}
|
||||
## {{ .Name }}
|
||||
{{range flagGroups .}}{{if .Flags.HasFlags}}
|
||||
## {{.Name}}
|
||||
|
||||
{{ .Help }}
|
||||
{{.Help}}.
|
||||
|
||||
` + "```" + `
|
||||
{{ .Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{.Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
` + "```" + `
|
||||
|
||||
{{ end }}{{ end }}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
// show all the backends
|
||||
|
|
|
@ -27,8 +27,7 @@ func init() {
|
|||
var commandDefinition = &cobra.Command{
|
||||
Use: "link remote:path",
|
||||
Short: `Generate public link to file/folder.`,
|
||||
Long: `rclone link will create, retrieve or remove a public link to the given
|
||||
file or folder.
|
||||
Long: `Create, retrieve or remove a public link to the given file or folder.
|
||||
|
||||
rclone link remote:path/to/file
|
||||
rclone link remote:path/to/folder/
|
||||
|
|
|
@ -2,60 +2,232 @@
|
|||
package ls
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
listLong bool
|
||||
listLong bool
|
||||
jsonOutput bool
|
||||
filterName string
|
||||
filterType string
|
||||
filterSource string
|
||||
filterDescription string
|
||||
orderBy string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type and the description as well as names", "")
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", false, "Show type and description in addition to name", "")
|
||||
flags.StringVarP(cmdFlags, &filterName, "name", "", "", "Filter remotes by name", "")
|
||||
flags.StringVarP(cmdFlags, &filterType, "type", "", "", "Filter remotes by type", "")
|
||||
flags.StringVarP(cmdFlags, &filterSource, "source", "", "", "Filter remotes by source, e.g. 'file' or 'environment'", "")
|
||||
flags.StringVarP(cmdFlags, &filterDescription, "description", "", "", "Filter remotes by description", "")
|
||||
flags.StringVarP(cmdFlags, &orderBy, "order-by", "", "", "Instructions on how to order the result, e.g. 'type,name=descending'", "")
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "")
|
||||
}
|
||||
|
||||
// lessFn compares to remotes for order by
|
||||
type lessFn func(a, b config.Remote) bool
|
||||
|
||||
// newLess returns a function for comparing remotes based on an order by string
|
||||
func newLess(orderBy string) (less lessFn, err error) {
|
||||
if orderBy == "" {
|
||||
return nil, nil
|
||||
}
|
||||
parts := strings.Split(strings.ToLower(orderBy), ",")
|
||||
n := len(parts)
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
fieldAndDirection := strings.SplitN(parts[i], "=", 2)
|
||||
|
||||
descending := false
|
||||
if len(fieldAndDirection) > 1 {
|
||||
switch fieldAndDirection[1] {
|
||||
case "ascending", "asc":
|
||||
case "descending", "desc":
|
||||
descending = true
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown --order-by direction %q", fieldAndDirection[1])
|
||||
}
|
||||
}
|
||||
|
||||
var field func(o config.Remote) string
|
||||
switch fieldAndDirection[0] {
|
||||
case "name":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Name
|
||||
}
|
||||
case "type":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Type
|
||||
}
|
||||
case "source":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Source
|
||||
}
|
||||
case "description":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Description
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown --order-by field %q", fieldAndDirection[0])
|
||||
}
|
||||
|
||||
var thisLess lessFn
|
||||
if descending {
|
||||
thisLess = func(a, b config.Remote) bool {
|
||||
return field(a) > field(b)
|
||||
}
|
||||
} else {
|
||||
thisLess = func(a, b config.Remote) bool {
|
||||
return field(a) < field(b)
|
||||
}
|
||||
}
|
||||
|
||||
if i == n-1 {
|
||||
less = thisLess
|
||||
} else {
|
||||
nextLess := less
|
||||
less = func(a, b config.Remote) bool {
|
||||
if field(a) == field(b) {
|
||||
return nextLess(a, b)
|
||||
}
|
||||
return thisLess(a, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
return less, nil
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "listremotes",
|
||||
Use: "listremotes [<filter>]",
|
||||
Short: `List all the remotes in the config file and defined in environment variables.`,
|
||||
Long: `
|
||||
rclone listremotes lists all the available remotes from the config file.
|
||||
Lists all the available remotes from the config file, or the remotes matching
|
||||
an optional filter.
|
||||
|
||||
When used with the ` + "`--long`" + ` flag it lists the types and the descriptions too.
|
||||
Prints the result in human-readable format by default, and as a simple list of
|
||||
remote names, or if used with flag ` + "`--long`" + ` a tabular format including
|
||||
the remote names, types and descriptions. Using flag ` + "`--json`" + ` produces
|
||||
machine-readable output instead, which always includes all attributes - including
|
||||
the source (file or environment).
|
||||
|
||||
Result can be filtered by a filter argument which applies to all attributes,
|
||||
and/or filter flags specific for each attribute. The values must be specified
|
||||
according to regular rclone filtering pattern syntax.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.34",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := config.FileSections()
|
||||
sort.Strings(remotes)
|
||||
maxlen := 1
|
||||
maxlentype := 1
|
||||
for _, remote := range remotes {
|
||||
if len(remote) > maxlen {
|
||||
maxlen = len(remote)
|
||||
}
|
||||
t := config.FileGet(remote, "type")
|
||||
if len(t) > maxlentype {
|
||||
maxlentype = len(t)
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
var filterDefault string
|
||||
if len(args) > 0 {
|
||||
filterDefault = args[0]
|
||||
}
|
||||
filters := make(map[string]*regexp.Regexp)
|
||||
for k, v := range map[string]string{
|
||||
"all": filterDefault,
|
||||
"name": filterName,
|
||||
"type": filterType,
|
||||
"source": filterSource,
|
||||
"description": filterDescription,
|
||||
} {
|
||||
if v != "" {
|
||||
filterRe, err := filter.GlobStringToRegexp(v, false, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s filter argument: %w", k, err)
|
||||
}
|
||||
fs.Debugf(nil, "Filter for %s: %s", k, filterRe.String())
|
||||
filters[k] = filterRe
|
||||
}
|
||||
}
|
||||
remotes := config.GetRemotes()
|
||||
maxName := 0
|
||||
maxType := 0
|
||||
i := 0
|
||||
for _, remote := range remotes {
|
||||
if listLong {
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
description := config.FileGet(remote, "description")
|
||||
fmt.Printf("%-*s %-*s %s\n", maxlen+1, remote+":", maxlentype+1, remoteType, description)
|
||||
} else {
|
||||
fmt.Printf("%s:\n", remote)
|
||||
include := true
|
||||
for k, v := range filters {
|
||||
if k == "all" && !(v.MatchString(remote.Name) || v.MatchString(remote.Type) || v.MatchString(remote.Source) || v.MatchString(remote.Description)) {
|
||||
include = false
|
||||
} else if k == "name" && !v.MatchString(remote.Name) {
|
||||
include = false
|
||||
} else if k == "type" && !v.MatchString(remote.Type) {
|
||||
include = false
|
||||
} else if k == "source" && !v.MatchString(remote.Source) {
|
||||
include = false
|
||||
} else if k == "description" && !v.MatchString(remote.Description) {
|
||||
include = false
|
||||
}
|
||||
}
|
||||
if include {
|
||||
if len(remote.Name) > maxName {
|
||||
maxName = len(remote.Name)
|
||||
}
|
||||
if len(remote.Type) > maxType {
|
||||
maxType = len(remote.Type)
|
||||
}
|
||||
remotes[i] = remote
|
||||
i++
|
||||
}
|
||||
}
|
||||
remotes = remotes[:i]
|
||||
|
||||
less, err := newLess(orderBy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if less != nil {
|
||||
sliceLessFn := func(i, j int) bool {
|
||||
return less(remotes[i], remotes[j])
|
||||
}
|
||||
sort.SliceStable(remotes, sliceLessFn)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
fmt.Println("[")
|
||||
first := true
|
||||
for _, remote := range remotes {
|
||||
out, err := json.Marshal(remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal remote object: %w", err)
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Print(",\n")
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to output: %w", err)
|
||||
}
|
||||
}
|
||||
if !first {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("]")
|
||||
} else if listLong {
|
||||
for _, remote := range remotes {
|
||||
fmt.Printf("%-*s %-*s %s\n", maxName+1, remote.Name+":", maxType, remote.Type, remote.Description)
|
||||
}
|
||||
} else {
|
||||
for _, remote := range remotes {
|
||||
fmt.Printf("%s:\n", remote.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue