Compare commits

..

1 commit

Author SHA1 Message Date
Nick Craig-Wood
cc0e304251 vfs: fix open files disappearing from directory listings
In this commit

aaadb48d48 vfs: keep virtual directory status accurate and reduce deadlock potential

We reworked the virtual directory detection to use an atomic bool so
that we could run part of the cache forgetting only with a read lock.

Unfortunately this had a bug which meant that directories with virtual
items could be forgotten.

This commit changes the boolean into a count of virtual entries which
should be more accurate.

Fixes #8082
2024-10-01 15:51:05 +01:00
157 changed files with 9855 additions and 5918 deletions

View file

@ -17,11 +17,12 @@ on:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60
strategy:
fail-fast: false
@ -123,8 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get update
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@ -217,7 +217,7 @@ jobs:
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
@ -296,7 +296,7 @@ jobs:
run: govulncheck ./...
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest

View file

@ -66,7 +66,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@ -93,7 +92,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
@ -111,7 +109,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)

View file

@ -26,7 +26,6 @@ import (
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"

View file

@ -46,6 +46,7 @@ import (
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
)
const (
@ -64,10 +65,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,

View file

@ -80,10 +80,9 @@ const (
// Globals
var (
// Description of how to auth for this app
driveConfig = &oauthutil.Config{
driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@ -121,7 +120,6 @@ var (
"text/html": ".html",
"text/plain": ".txt",
"text/tab-separated-values": ".tsv",
"text/markdown": ".md",
}
_mimeTypeToExtensionLinks = map[string]string{
"application/x-link-desktop": ".desktop",
@ -3560,8 +3558,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
return nil
}
// Run the drive query calling fn on each entry found
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
list := f.svc.Files.List()
if query != "" {
list.Q(query)
@ -3580,7 +3577,10 @@ func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (e
if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder")
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
var results []*drive.File
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
@ -3588,66 +3588,20 @@ func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (e
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
return nil, fmt.Errorf("failed to execute query: %w", err)
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
}
for _, item := range files.Files {
fn(item)
}
results = append(results, files.Files...)
if files.NextPageToken == "" {
break
}
list.PageToken(files.NextPageToken)
}
return nil
}
// Run the drive query returning the entries found
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
var results []*drive.File
err = f.queryFn(ctx, query, func(item *drive.File) {
results = append(results, item)
})
if err != nil {
return nil, err
}
return results, nil
}
// Rescue, list or delete orphaned files
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
if len(item.Parents) != 0 {
return
}
// Have found an orphaned entry
if delete {
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
err = f.delete(ctx, item.Id, true)
if err != nil {
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
}
} else if dirID == "" {
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
} else {
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(item.Id, nil).
AddParents(dirID).
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
}
}
})
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@ -3839,37 +3793,6 @@ The result is a JSON array of matches, for example:
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
}
]`,
}, {
Name: "rescue",
Short: "Rescue or delete any orphaned files",
Long: `This command rescues or deletes any orphaned files or directories.
Sometimes files can get orphaned in Google Drive. This means that they
are no longer in any folder in Google Drive.
This command finds those files and either rescues them to a directory
you specify or deletes them.
Usage:
This can be used in 3 ways.
First, list all orphaned files
rclone backend rescue drive:
Second rescue all orphaned files to the directory indicated
rclone backend rescue drive: "relative/path/to/rescue/directory"
e.g. To rescue all orphans to a directory called "Orphans" in the top level
rclone backend rescue drive: Orphans
Third delete all orphaned files to the trash
rclone backend rescue drive: -o delete
`,
}}
// Command the backend to run a named command
@ -3998,22 +3921,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
} else {
return nil, errors.New("need a query argument")
}
case "rescue":
dirID := ""
_, delete := opt["delete"]
if len(arg) == 0 {
// no arguments - list only
} else if !delete && len(arg) == 1 {
dir := arg[0]
dirID, err = f.dirCache.FindDir(ctx, dir, true)
if err != nil {
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
}
fs.Infof(f, "Rescuing orphans into %q", dir)
} else {
return nil, errors.New("syntax error: need 0 or 1 args or -o delete")
}
return nil, f.rescue(ctx, dirID, delete)
default:
return nil, fs.ErrorCommandNotFound
}

View file

@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
wantErr error
}{
{"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} {

View file

@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@ -94,7 +93,7 @@ const (
var (
// Description of how to auth for this app
dropboxConfig = &oauthutil.Config{
dropboxConfig = &oauth2.Config{
Scopes: []string{
"files.metadata.write",
"files.content.write",
@ -109,8 +108,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -135,7 +133,7 @@ var (
)
// Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig
@ -1022,20 +1020,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Temporary Object under construction
dstObj := &Object{
fs: f,
@ -1049,6 +1040,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg)

View file

@ -1214,7 +1214,7 @@ func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
@ -1228,19 +1228,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
}
// Find existing object
existingObj, err := f.NewObject(ctx, remote)
if err == nil {
defer func() {
// Don't remove existing object if returning an error
if err != nil {
return
}
fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
err = existingObj.Remove(ctx)
}()
}
// Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
if err != nil {

View file

@ -60,17 +60,14 @@ const (
minSleep = 10 * time.Millisecond
)
var (
// Description of how to auth for this app
storageConfig = &oauthutil.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Description of how to auth for this app
var storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Register with Fs
func init() {

View file

@ -33,6 +33,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
@ -59,14 +60,13 @@ const (
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
oauthConfig = &oauth2.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,

View file

@ -31,6 +31,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@ -47,9 +48,11 @@ const (
// Globals
var (
// Description of how to auth for this app.
oauthConfig = &oauthutil.Config{
AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,

View file

@ -1,166 +0,0 @@
// Package api provides functionality for interacting with the iCloud API.
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
const (
baseEndpoint = "https://www.icloud.com"
homeEndpoint = "https://www.icloud.com"
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
)
type sessionSave func(*Session)
// Client defines the client configuration
type Client struct {
appleID string
password string
srv *rest.Client
Session *Session
sessionSaveCallback sessionSave
drive *DriveService
}
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
//
// Parameters:
// - appleID: the Apple ID of the user.
// - password: the password of the user.
// - trustToken: the trust token for the session.
// - clientID: the client id for the session.
// - cookies: the cookies for the session.
// - sessionSaveCallback: the callback function to save the session.
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
icloud := &Client{
appleID: appleID,
password: password,
srv: rest.NewClient(fshttp.NewClient(context.Background())),
Session: NewSession(),
sessionSaveCallback: sessionSaveCallback,
}
icloud.Session.TrustToken = trustToken
icloud.Session.Cookies = cookies
icloud.Session.ClientID = clientID
return icloud, nil
}
// DriveService returns the DriveService instance associated with the Client.
func (c *Client) DriveService() (*DriveService, error) {
var err error
if c.drive == nil {
c.drive, err = NewDriveService(c)
if err != nil {
return nil, err
}
}
return c.drive, nil
}
// Request makes a request and retries it if the session is invalid.
//
// This function is the main entry point for making requests to the iCloud
// API. If the initial request returns a 401 (Unauthorized), it will try to
// reauthenticate and retry the request.
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
resp, err = c.Session.Request(ctx, opts, request, response)
if err != nil && resp != nil {
// try to reauth
if resp.StatusCode == 401 || resp.StatusCode == 421 {
err = c.Authenticate(ctx)
if err != nil {
return nil, err
}
if c.Session.Requires2FA() {
return nil, errors.New("trust token expired, please reauth")
}
return c.RequestNoReAuth(ctx, opts, request, response)
}
}
return resp, err
}
// RequestNoReAuth makes a request without re-authenticating.
//
// This function is useful when you have a session that is already
// authenticated, but you need to make a request without triggering
// a re-authentication.
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
// Make the request without re-authenticating
resp, err = c.Session.Request(ctx, opts, request, response)
return resp, err
}
// Authenticate authenticates the client with the iCloud API.
func (c *Client) Authenticate(ctx context.Context) error {
if c.Session.Cookies != nil {
if err := c.Session.ValidateSession(ctx); err == nil {
fs.Debugf("icloud", "Valid session, no need to reauth")
return nil
}
c.Session.Cookies = nil
}
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
err := c.Session.SignIn(ctx, c.appleID, c.password)
if err == nil {
err = c.Session.AuthWithToken(ctx)
if err == nil && c.sessionSaveCallback != nil {
c.sessionSaveCallback(c.Session)
}
}
return err
}
// SignIn signs in the client using the provided context and credentials.
func (c *Client) SignIn(ctx context.Context) error {
return c.Session.SignIn(ctx, c.appleID, c.password)
}
// IntoReader marshals the provided values into a JSON encoded reader
func IntoReader(values any) (*bytes.Reader, error) {
m, err := json.Marshal(values)
if err != nil {
return nil, err
}
return bytes.NewReader(m), nil
}
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
type RequestError struct {
Status string
Text string
}
// Error satisfy the error interface.
func (e *RequestError) Error() string {
return fmt.Sprintf("%s: %s", e.Text, e.Status)
}
func newRequestError(Status string, Text string) *RequestError {
return &RequestError{
Status: strings.ToLower(Status),
Text: Text,
}
}
// newErr orf makes a new error from sprintf parameters.
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
}

View file

@ -1,913 +0,0 @@
package api
import (
"bytes"
"context"
"io"
"mime"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
const (
defaultZone = "com.apple.CloudDocs"
statusOk = "OK"
statusEtagConflict = "ETAG_CONFLICT"
)
// DriveService represents an iCloud Drive service.
type DriveService struct {
icloud *Client
RootID string
endpoint string
docsEndpoint string
}
// NewDriveService creates a new DriveService instance.
func NewDriveService(icloud *Client) (*DriveService, error) {
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
}
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
if err != nil {
return nil, resp, err
}
return items[0], resp, err
}
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
var err error
_items := []map[string]any{}
for _, id := range ids {
_items = append(_items, map[string]any{
"drivewsid": id,
"partialData": false,
"includeHierarchy": false,
})
}
var body *bytes.Reader
var path string
if !includeChildren {
values := []map[string]any{{
"items": _items,
}}
body, err = IntoReader(values)
if err != nil {
return nil, nil, err
}
path = "/retrieveItemDetails"
} else {
values := _items
body, err = IntoReader(values)
if err != nil {
return nil, nil, err
}
path = "/retrieveItemDetailsInFolders"
}
opts := rest.Opts{
Method: "POST",
Path: path,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items []*DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
return items, resp, err
}
// GetDocByPath retrieves a document by its path.
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
values := url.Values{}
values.Set("unified_format", "false")
body, err := IntoReader(path)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
Body: body,
}
var item []*Document
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item[0], resp, err
}
// GetItemByPath retrieves a DriveItem by its path.
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
values := url.Values{}
values.Set("unified_format", "true")
body, err := IntoReader(path)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
Body: body,
}
var item []*DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item[0], resp, err
}
// GetDocByItemID retrieves a document by its item ID.
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
values := url.Values{}
values.Set("document_id", id)
values.Set("unified_format", "false") // important
opts := rest.Opts{
Method: "GET",
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
}
var item *Document
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item, resp, err
}
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
opts := rest.Opts{
Method: "GET",
Path: "/v1/item/" + id,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
}
var item *DriveItemRaw
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item, resp, err
}
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
values := url.Values{}
values.Set("limit", strconv.FormatInt(limit, 10))
opts := rest.Opts{
Method: "GET",
Path: "/v1/enumerate/" + id,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
}
items := struct {
Items []*DriveItemRaw `json:"drive_item"`
}{}
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
return items.Items, resp, err
}
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
_, zone, docid := DeconstructDriveID(id)
values := url.Values{}
values.Set("document_id", docid)
if zone == "" {
zone = defaultZone
}
opts := rest.Opts{
Method: "GET",
Path: "/ws/" + zone + "/download/by_id",
Parameters: values,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
}
var filer *FileRequest
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
if err != nil {
return "", resp, err
}
var url string
if filer.DataToken != nil {
url = filer.DataToken.URL
} else {
url = filer.PackageToken.URL
}
return url, resp, err
}
// DownloadFile downloads a file from the given URL using the provided options.
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
opts := &rest.Opts{
Method: "GET",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: url,
Options: opt,
}
resp, err := d.icloud.srv.Call(ctx, opts)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
}
return resp, err
}
return d.icloud.srv.Call(ctx, opts)
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
}
// MoveItemToTrashByID moves an item to the trash based on the item ID.
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"items": []map[string]any{{
"drivewsid": drivewsid,
"etag": etag,
"clientId": drivewsid,
}}}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/moveItemsToTrash",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
item := struct {
Items []*DriveItem `json:"items"`
}{}
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
if item.Items[0].Status != statusOk {
// rerun with latest etag
if force && item.Items[0].Status == "ETAG_CONFLICT" {
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
}
err = newRequestError(item.Items[0].Status, "unknown request status")
}
return item.Items[0], resp, err
}
// CreateNewFolderByItemID creates a new folder by item ID.
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
}
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
values := map[string]any{
"destinationDrivewsId": drivewsid,
"folders": []map[string]any{{
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
"name": name,
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/createFolders",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var fResp *CreateFoldersResponse
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
if err != nil {
return nil, resp, err
}
status := fResp.Folders[0].Status
if status != statusOk {
err = newRequestError(status, "unknown request status")
}
return fResp.Folders[0], resp, err
}
// RenameItemByItemID renames a DriveItem by its item ID.
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
}
// RenameItemByDriveID renames a DriveItem by its drive ID.
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"items": []map[string]any{{
"drivewsid": id,
"name": name,
"etag": etag,
// "extension": split[1],
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/renameItems",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items *DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
status := items.Items[0].Status
if status != statusOk {
// rerun with latest etag
if force && status == "ETAG_CONFLICT" {
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
}
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
}
return items.Items[0], resp, err
}
// MoveItemByItemID moves an item by its item ID to a destination item ID.
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
docSrc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
if err != nil {
return nil, resp, err
}
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
}
// MoveItemByDocID moves an item by its doc ID.
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
// }
// MoveItemByDriveID moves an item by its drive ID.
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"destinationDrivewsId": dstID,
"items": []map[string]any{{
"drivewsid": id,
"etag": etag,
"clientId": id,
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/moveItems",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items *DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
status := items.Items[0].Status
if status != statusOk {
// rerun with latest etag
if force && status == "ETAG_CONFLICT" {
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
}
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
}
return items.Items[0], resp, err
}
// CopyDocByItemID copies a document by its item ID.
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
values := map[string]any{
"info_to_update": map[string]any{},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/v1/item/copy/" + itemID,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var info *DriveItemRaw
resp, err := d.icloud.Request(ctx, opts, nil, &info)
if err != nil {
return nil, resp, err
}
return info, resp, err
}
// CreateUpload creates an url for an upload.
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
// first we need to request an upload url
values := map[string]any{
"filename": name,
"type": "FILE",
"size": strconv.FormatInt(size, 10),
"content_type": GetContentTypeForFile(name),
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/upload/web",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var responseInfo []*UploadResponse
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
if err != nil {
return nil, resp, err
}
return responseInfo[0], resp, err
}
// Upload uploads a file to the given url
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
// TODO: implement multipart upload
opts := rest.Opts{
Method: "POST",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: uploadURL,
Body: in,
ContentLength: &size,
ContentType: GetContentTypeForFile(name),
// MultipartContentName: "files",
MultipartFileName: name,
}
var singleFileResponse *SingleFileResponse
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
if err != nil {
return nil, resp, err
}
return singleFileResponse, resp, err
}
// UpdateFile updates a file in the DriveService.
//
// ctx: the context.Context object for the request.
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
body, err := IntoReader(r)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/update/documents",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var responseInfo *DocumentUpdateResponse
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
if err != nil {
return nil, resp, err
}
doc := responseInfo.Results[0].Document
item := DriveItem{
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
Docwsid: doc.DocumentID,
Itemid: doc.ItemID,
Etag: doc.Etag,
ParentID: doc.ParentID,
DateModified: time.Unix(r.Mtime, 0),
DateCreated: time.Unix(r.Mtime, 0),
Type: doc.Type,
Name: doc.Name,
Size: doc.Size,
}
return &item, resp, err
}
// UpdateFileInfo represents the information for an update to a file in the DriveService.
type UpdateFileInfo struct {
AllowConflict bool `json:"allow_conflict"`
Btime int64 `json:"btime"`
Command string `json:"command"`
CreateShortGUID bool `json:"create_short_guid"`
Data struct {
Receipt string `json:"receipt,omitempty"`
ReferenceSignature string `json:"reference_signature,omitempty"`
Signature string `json:"signature,omitempty"`
Size int64 `json:"size,omitempty"`
WrappingKey string `json:"wrapping_key,omitempty"`
} `json:"data,omitempty"`
DocumentID string `json:"document_id"`
FileFlags FileFlags `json:"file_flags"`
Mtime int64 `json:"mtime"`
Path struct {
Path string `json:"path"`
StartingDocumentID string `json:"starting_document_id"`
} `json:"path"`
}
// FileFlags defines the file flags for a document.
type FileFlags struct {
IsExecutable bool `json:"is_executable"`
IsHidden bool `json:"is_hidden"`
IsWritable bool `json:"is_writable"`
}
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
//
// Returns an UpdateFileInfo object.
func NewUpdateFileInfo() UpdateFileInfo {
return UpdateFileInfo{
Command: "add_file",
CreateShortGUID: true,
AllowConflict: true,
FileFlags: FileFlags{
IsExecutable: true,
IsHidden: false,
IsWritable: false,
},
}
}
// DriveItemRaw is a raw drive item.
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
type DriveItemRaw struct {
ItemID string `json:"item_id"`
ItemInfo *DriveItemRawInfo `json:"item_info"`
}
// SplitName splits the name of a DriveItemRaw into its name and extension.
//
// It returns the name and extension as separate strings. If the name ends with a dot,
// it means there is no extension, so an empty string is returned for the extension.
// If the name does not contain a dot, it means
func (d *DriveItemRaw) SplitName() (string, string) {
name := d.ItemInfo.Name
// ends with a dot, no extension
if strings.HasSuffix(name, ".") {
return name, ""
}
lastInd := strings.LastIndex(name, ".")
if lastInd == -1 {
return name, ""
}
return name[:lastInd], name[lastInd+1:]
}
// ModTime returns the modification time of the DriveItemRaw.
//
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
// If the parsing fails, it returns the zero value of time.Time.
// The returned time.Time value represents the modification time of the DriveItemRaw.
func (d *DriveItemRaw) ModTime() time.Time {
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
if err != nil {
return time.Time{}
}
return time.UnixMilli(i)
}
// CreatedTime returns the creation time of the DriveItemRaw.
//
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
// If the parsing fails, it returns the zero value of time.Time.
// The returned time.Time
func (d *DriveItemRaw) CreatedTime() time.Time {
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
if err != nil {
return time.Time{}
}
return time.UnixMilli(i)
}
// DriveItemRawInfo is the raw information about a drive item.
type DriveItemRawInfo struct {
Name string `json:"name"`
// Extension is absolutely borked on endpoints so dont use it.
Extension string `json:"extension"`
Size int64 `json:"size,string"`
Type string `json:"type"`
Version string `json:"version"`
ModifiedAt string `json:"modified_at"`
CreatedAt string `json:"created_at"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
}
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
//
// It takes no parameters.
// It returns a pointer to a DriveItem.
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
name, extension := d.SplitName()
return &DriveItem{
Itemid: d.ItemID,
Name: name,
Extension: extension,
Type: d.ItemInfo.Type,
Etag: d.ItemInfo.Version,
DateModified: d.ModTime(),
DateCreated: d.CreatedTime(),
Size: d.ItemInfo.Size,
Urls: d.ItemInfo.Urls,
}
}
// DocumentUpdateResponse is the response of a document update request.
type DocumentUpdateResponse struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
Results []struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
OperationID interface{} `json:"operation_id"`
Document *Document `json:"document"`
} `json:"results"`
}
// Document represents a document on iCloud.
type Document struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
DocumentID string `json:"document_id"`
ItemID string `json:"item_id"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
Etag string `json:"etag"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Type string `json:"type"`
Deleted bool `json:"deleted"`
Mtime int64 `json:"mtime"`
LastEditorName string `json:"last_editor_name"`
Data DocumentData `json:"data"`
Size int64 `json:"size"`
Btime int64 `json:"btime"`
Zone string `json:"zone"`
FileFlags struct {
IsExecutable bool `json:"is_executable"`
IsWritable bool `json:"is_writable"`
IsHidden bool `json:"is_hidden"`
} `json:"file_flags"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath interface{} `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
}
// DriveID returns the drive ID of the Document.
func (d *Document) DriveID() string {
if d.Zone == "" {
d.Zone = defaultZone
}
return d.Type + "::" + d.Zone + "::" + d.DocumentID
}
// DocumentData represents the data of a document.
type DocumentData struct {
Signature string `json:"signature"`
Owner string `json:"owner"`
Size int64 `json:"size"`
ReferenceSignature string `json:"reference_signature"`
WrappingKey string `json:"wrapping_key"`
PcsInfo string `json:"pcsInfo"`
}
// SingleFileResponse is the response of a single file request.
type SingleFileResponse struct {
SingleFile *SingleFileInfo `json:"singleFile"`
}
// SingleFileInfo represents the information of a single file.
type SingleFileInfo struct {
ReferenceSignature string `json:"referenceChecksum"`
Size int64 `json:"size"`
Signature string `json:"fileChecksum"`
WrappingKey string `json:"wrappingKey"`
Receipt string `json:"receipt"`
}
// UploadResponse is the response of an upload request.
type UploadResponse struct {
URL string `json:"url"`
DocumentID string `json:"document_id"`
}
// FileRequestToken represents the token of a file request.
type FileRequestToken struct {
URL string `json:"url"`
Token string `json:"token"`
Signature string `json:"signature"`
WrappingKey string `json:"wrapping_key"`
ReferenceSignature string `json:"reference_signature"`
}
// FileRequest represents the request of a file.
type FileRequest struct {
DocumentID string `json:"document_id"`
ItemID string `json:"item_id"`
OwnerDsid int64 `json:"owner_dsid"`
DataToken *FileRequestToken `json:"data_token,omitempty"`
PackageToken *FileRequestToken `json:"package_token,omitempty"`
DoubleEtag string `json:"double_etag"`
}
// CreateFoldersResponse is the response of a create folders request.
type CreateFoldersResponse struct {
Folders []*DriveItem `json:"folders"`
}
// DriveItem represents an item on iCloud.
type DriveItem struct {
DateCreated time.Time `json:"dateCreated"`
Drivewsid string `json:"drivewsid"`
Docwsid string `json:"docwsid"`
Itemid string `json:"item_id"`
Zone string `json:"zone"`
Name string `json:"name"`
ParentID string `json:"parentId"`
Hierarchy []DriveItem `json:"hierarchy"`
Etag string `json:"etag"`
Type string `json:"type"`
AssetQuota int64 `json:"assetQuota"`
FileCount int64 `json:"fileCount"`
ShareCount int64 `json:"shareCount"`
ShareAliasCount int64 `json:"shareAliasCount"`
DirectChildrenCount int64 `json:"directChildrenCount"`
Items []*DriveItem `json:"items"`
NumberOfItems int64 `json:"numberOfItems"`
Status string `json:"status"`
Extension string `json:"extension,omitempty"`
DateModified time.Time `json:"dateModified,omitempty"`
DateChanged time.Time `json:"dateChanged,omitempty"`
Size int64 `json:"size,omitempty"`
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
}
// IsFolder returns true if the item is a folder.
func (d *DriveItem) IsFolder() bool {
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
}
// DownloadURL returns the download URL of the item.
func (d *DriveItem) DownloadURL() string {
return d.Urls.URLDownload
}
// FullName returns the full name of the item.
// name + extension
func (d *DriveItem) FullName() string {
if d.Extension != "" {
return d.Name + "." + d.Extension
}
return d.Name
}
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
func GetDocIDFromDriveID(id string) string {
split := strings.Split(id, "::")
return split[len(split)-1]
}
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
func DeconstructDriveID(id string) (docType, zone, docid string) {
split := strings.Split(id, "::")
if len(split) < 3 {
return "", "", id
}
return split[0], split[1], split[2]
}
// ConstructDriveID constructs a drive ID from the given components.
func ConstructDriveID(id string, zone string, t string) string {
return strings.Join([]string{t, zone, id}, "::")
}
// GetContentTypeForFile detects content type for given file name.
func GetContentTypeForFile(name string) string {
// detect MIME type by looking at the filename only
mimeType := mime.TypeByExtension(filepath.Ext(name))
if mimeType == "" {
// api requires a mime type passed in
mimeType = "text/plain"
}
return strings.Split(mimeType, ";")[0]
}

View file

@ -1,412 +0,0 @@
package api
import (
"context"
"fmt"
"net/http"
"net/url"
"slices"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
// Session represents an iCloud session
type Session struct {
SessionToken string `json:"session_token"`
Scnt string `json:"scnt"`
SessionID string `json:"session_id"`
AccountCountry string `json:"account_country"`
TrustToken string `json:"trust_token"`
ClientID string `json:"client_id"`
Cookies []*http.Cookie `json:"cookies"`
AccountInfo AccountInfo `json:"account_info"`
srv *rest.Client `json:"-"`
}
// String returns the session as a string
// func (s *Session) String() string {
// jsession, _ := json.Marshal(s)
// return string(jsession)
// }
// Request makes a request
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
if err != nil {
return resp, err
}
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
s.AccountCountry = val
}
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
s.SessionID = val
}
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
s.SessionToken = val
}
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
s.TrustToken = val
}
if val := resp.Header.Get("scnt"); val != "" {
s.Scnt = val
}
return resp, nil
}
// Requires2FA returns true if the session requires 2FA
func (s *Session) Requires2FA() bool {
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
}
// SignIn signs in the session
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
trustTokens := []string{}
if s.TrustToken != "" {
trustTokens = []string{s.TrustToken}
}
values := map[string]any{
"accountName": appleID,
"password": password,
"rememberMe": true,
"trustTokens": trustTokens,
}
body, err := IntoReader(values)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/signin",
Parameters: url.Values{},
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
RootURL: authEndpoint,
IgnoreStatus: true, // need to handle 409 for hsa2
NoResponse: true,
Body: body,
}
opts.Parameters.Set("isRememberMeEnabled", "true")
_, err = s.Request(ctx, opts, nil, nil)
return err
}
// AuthWithToken authenticates the session
func (s *Session) AuthWithToken(ctx context.Context) error {
values := map[string]any{
"accountCountryCode": s.AccountCountry,
"dsWebAuthToken": s.SessionToken,
"extended_login": true,
"trustToken": s.TrustToken,
}
body, err := IntoReader(values)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/accountLogin",
ExtraHeaders: GetCommonHeaders(map[string]string{}),
RootURL: setupEndpoint,
Body: body,
}
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
if err == nil {
s.Cookies = resp.Cookies()
}
return err
}
// Validate2FACode validates the 2FA code
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
body, err := IntoReader(values)
if err != nil {
return err
}
headers := s.GetAuthHeaders(map[string]string{})
headers["scnt"] = s.Scnt
headers["X-Apple-ID-Session-Id"] = s.SessionID
opts := rest.Opts{
Method: "POST",
Path: "/verify/trusteddevice/securitycode",
ExtraHeaders: headers,
RootURL: authEndpoint,
Body: body,
NoResponse: true,
}
_, err = s.Request(ctx, opts, nil, nil)
if err == nil {
if err := s.TrustSession(ctx); err != nil {
return err
}
return nil
}
return fmt.Errorf("validate2FACode failed: %w", err)
}
// TrustSession trusts the session
func (s *Session) TrustSession(ctx context.Context) error {
headers := s.GetAuthHeaders(map[string]string{})
headers["scnt"] = s.Scnt
headers["X-Apple-ID-Session-Id"] = s.SessionID
opts := rest.Opts{
Method: "GET",
Path: "/2sv/trust",
ExtraHeaders: headers,
RootURL: authEndpoint,
NoResponse: true,
ContentLength: common.Int64(0),
}
_, err := s.Request(ctx, opts, nil, nil)
if err != nil {
return fmt.Errorf("trustSession failed: %w", err)
}
return s.AuthWithToken(ctx)
}
// ValidateSession validates the session
func (s *Session) ValidateSession(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "/validate",
ExtraHeaders: s.GetHeaders(map[string]string{}),
RootURL: setupEndpoint,
ContentLength: common.Int64(0),
}
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
if err != nil {
return fmt.Errorf("validateSession failed: %w", err)
}
return nil
}
// GetAuthHeaders returns the authentication headers for the session.
//
// It takes an `overwrite` map[string]string parameter which allows
// overwriting the default headers. It returns a map[string]string.
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
headers := map[string]string{
"Accept": "application/json",
"Content-Type": "application/json",
"X-Apple-OAuth-Client-Id": s.ClientID,
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
"X-Apple-OAuth-Require-Grant-Code": "true",
"X-Apple-OAuth-Response-Mode": "web_message",
"X-Apple-OAuth-Response-Type": "code",
"X-Apple-OAuth-State": s.ClientID,
"X-Apple-Widget-Key": s.ClientID,
"Origin": homeEndpoint,
"Referer": fmt.Sprintf("%s/", homeEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
for k, v := range overwrite {
headers[k] = v
}
return headers
}
// GetHeaders Gets the authentication headers required for a request
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
headers := GetCommonHeaders(map[string]string{})
headers["Cookie"] = s.GetCookieString()
for k, v := range overwrite {
headers[k] = v
}
return headers
}
// GetCookieString returns the cookie header string for the session.
func (s *Session) GetCookieString() string {
cookieHeader := ""
// we only care about name and value.
for _, cookie := range s.Cookies {
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
}
return cookieHeader
}
// GetCommonHeaders generates common HTTP headers with optional overwrite.
func GetCommonHeaders(overwrite map[string]string) map[string]string {
headers := map[string]string{
"Content-Type": "application/json",
"Origin": baseEndpoint,
"Referer": fmt.Sprintf("%s/", baseEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
for k, v := range overwrite {
headers[k] = v
}
return headers
}
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
var hashes []string
for _, cookie := range right {
hashes = append(hashes, cookie.Raw)
}
for _, cookie := range left {
if !slices.Contains(hashes, cookie.Raw) {
right = append(right, cookie)
}
}
return right, nil
}
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
var domainCookies []*http.Cookie
for _, cookie := range cookies {
if strings.HasSuffix(url.Host, cookie.Domain) {
domainCookies = append(domainCookies, cookie)
}
}
return domainCookies, nil
}
// NewSession creates a new Session instance with default values.
func NewSession() *Session {
session := &Session{}
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
//session.ClientID = "auth-" + uuid.New().String()
return session
}
// AccountInfo represents an account info
type AccountInfo struct {
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
ICDPEnabled bool `json:"iCDPEnabled"`
Webservices map[string]*webService `json:"webservices"`
PcsEnabled bool `json:"pcsEnabled"`
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
ConfigBag struct {
Urls struct {
AccountCreateUI string `json:"accountCreateUI"`
AccountLoginUI string `json:"accountLoginUI"`
AccountLogin string `json:"accountLogin"`
AccountRepairUI string `json:"accountRepairUI"`
DownloadICloudTerms string `json:"downloadICloudTerms"`
RepairDone string `json:"repairDone"`
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
VettingURLForEmail string `json:"vettingUrlForEmail"`
AccountCreate string `json:"accountCreate"`
GetICloudTerms string `json:"getICloudTerms"`
VettingURLForPhone string `json:"vettingUrlForPhone"`
} `json:"urls"`
AccountCreateEnabled bool `json:"accountCreateEnabled"`
} `json:"configBag"`
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
AppsOrder []string `json:"appsOrder"`
Version int `json:"version"`
IsExtendedLogin bool `json:"isExtendedLogin"`
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
IsRepairNeeded bool `json:"isRepairNeeded"`
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
RequestInfo struct {
Country string `json:"country"`
TimeZone string `json:"timeZone"`
Region string `json:"region"`
} `json:"requestInfo"`
PcsDeleted bool `json:"pcsDeleted"`
ICloudInfo struct {
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
} `json:"iCloudInfo"`
Apps map[string]*ValidateDataApp `json:"apps"`
}
// ValidateDataDsInfo represents an validation info
type ValidateDataDsInfo struct {
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []interface{} `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
AppleIDEntries []struct {
IsPrimary bool `json:"isPrimary"`
Type string `json:"type"`
Value string `json:"value"`
} `json:"appleIdEntries"`
GilliganEnabled bool `json:"gilligan-enabled"`
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
FullName string `json:"fullName"`
MailFlags struct {
IsThreadingAvailable bool `json:"isThreadingAvailable"`
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
SCKMail bool `json:"sCKMail"`
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
} `json:"mailFlags"`
LanguageCode string `json:"languageCode"`
AppleID string `json:"appleId"`
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
FirstName string `json:"firstName"`
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
NotesMigrated bool `json:"notesMigrated"`
BeneficiaryInfo struct {
IsBeneficiary bool `json:"isBeneficiary"`
} `json:"beneficiaryInfo"`
HasPaymentInfo bool `json:"hasPaymentInfo"`
PcsDelet bool `json:"pcsDelet"`
AppleIDAlias string `json:"appleIdAlias"`
BrMigrated bool `json:"brMigrated"`
StatusCode int `json:"statusCode"`
FamilyEligible bool `json:"familyEligible"`
}
// ValidateDataApp represents an app
type ValidateDataApp struct {
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
}
// WebService represents a web service
type webService struct {
PcsRequired bool `json:"pcsRequired"`
URL string `json:"url"`
UploadURL string `json:"uploadUrl"`
Status string `json:"status"`
}

File diff suppressed because it is too large Load diff

View file

@ -1,18 +0,0 @@
//go:build !plan9 && !solaris
package iclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/iclouddrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestICloudDrive:",
NilObject: (*iclouddrive.Object)(nil),
})
}

View file

@ -1,7 +0,0 @@
// Build for iclouddrive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris
// Package iclouddrive implements the iCloud Drive backend
package iclouddrive

View file

@ -277,9 +277,11 @@ machines.`)
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -290,9 +292,11 @@ machines.`)
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -303,9 +307,11 @@ machines.`)
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -316,9 +322,11 @@ machines.`)
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -916,17 +924,19 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
}
baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauthutil.Config{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
}
if ver == configVersion {
oauthConfig.ClientID = defaultClientID
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.TokenURL = tokenURL
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.AuthURL = tokenURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
clientID, ok := m.Get(configClientID)
@ -940,8 +950,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.TokenURL = legacyTokenURL
oauthConfig.AuthURL = legacyTokenURL
oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.Endpoint.AuthURL = legacyTokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {

View file

@ -1,16 +0,0 @@
//go:build windows || plan9 || js || linux
package local
import "os"
const haveLChmod = false
// lChmod changes the mode of the named file to mode. If the file is a symbolic
// link, it changes the link, not the target. If there is an error,
// it will be of type *PathError.
func lChmod(name string, mode os.FileMode) error {
// Can't do this safely on this OS - chmoding a symlink always
// changes the destination.
return nil
}

View file

@ -1,41 +0,0 @@
//go:build !windows && !plan9 && !js && !linux
package local
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
const haveLChmod = true
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
//
// Borrowed from the syscall source since it isn't public.
func syscallMode(i os.FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&os.ModeSetuid != 0 {
o |= syscall.S_ISUID
}
if i&os.ModeSetgid != 0 {
o |= syscall.S_ISGID
}
if i&os.ModeSticky != 0 {
o |= syscall.S_ISVTX
}
return o
}
// lChmod changes the mode of the named file to mode. If the file is a symbolic
// link, it changes the link, not the target. If there is an error,
// it will be of type *PathError.
func lChmod(name string, mode os.FileMode) error {
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
// and returns ENOTSUP if you try, so we don't support this on linux
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "lChmod", Path: name, Err: e}
}
return nil
}

View file

@ -1,4 +1,4 @@
//go:build plan9 || js
//go:build windows || plan9 || js
package local

View file

@ -1,19 +0,0 @@
//go:build windows
package local
import (
"time"
)
const haveLChtimes = true
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
return setTimes(name, atime, mtime, time.Time{}, true)
}

View file

@ -34,6 +34,7 @@ import (
// Constants
const (
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
)
@ -100,8 +101,10 @@ Metadata is supported on files and directories.
},
{
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
},
{
@ -376,22 +379,17 @@ type Directory struct {
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Override --local-links with --links if set
if ci.Links {
opt.TranslateSymlinks = true
}
if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks
}
@ -437,9 +435,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
@ -510,8 +508,8 @@ func (f *Fs) caseInsensitive() bool {
//
// for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink
}
@ -694,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above

View file

@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
r.CheckLocalItems(t, file1, file2, file3)
@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version
@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
@ -268,66 +268,22 @@ func TestMetadata(t *testing.T) {
r := fstest.NewRun(t)
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
whenRFC := when.Format(time.RFC3339Nano)
r.WriteFile(filePath, "metadata file contents", when)
f := r.Flocal.(*Fs)
// Set fs into "-l" / "--links" mode
f.opt.TranslateSymlinks = true
// Write a symlink to the file
symlinkPath := "metafile-link.txt"
osSymlinkPath := filepath.Join(f.root, symlinkPath)
symlinkPath += fs.LinkSuffix
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
// Get the object
obj, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
o := obj.(*Object)
// Get the symlink object
symlinkObj, err := f.NewObject(ctx, symlinkPath)
require.NoError(t, err)
symlinkO := symlinkObj.(*Object)
// Record metadata for o
oMeta, err := o.Metadata(ctx)
require.NoError(t, err)
// Test symlink first to check it doesn't mess up file
t.Run("Symlink", func(t *testing.T) {
testMetadata(t, r, symlinkO, symlinkModTime)
})
// Read it again
oMetaNew, err := o.Metadata(ctx)
require.NoError(t, err)
// Check that operating on the symlink didn't change the file it was pointing to
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
// Now run the same tests on the file
t.Run("File", func(t *testing.T) {
testMetadata(t, r, o, when)
})
}
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
ctx := context.Background()
whenRFC := when.Format(time.RFC3339Nano)
const dayLength = len("2001-01-01")
f := r.Flocal.(*Fs)
features := f.Features()
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
var hasXID, hasAtime, hasBtime bool
switch runtime.GOOS {
case "darwin", "freebsd", "netbsd", "linux":
hasXID, hasAtime, hasBtime = true, true, true
canSetXattrOnLinks = runtime.GOOS != "linux"
case "openbsd", "solaris":
hasXID, hasAtime = true, true
case "windows":
@ -350,10 +306,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
require.NoError(t, err)
assert.Nil(t, m)
if !canSetXattrOnLinks && o.translatedLink {
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
}
inM := fs.Metadata{
"potato": "chips",
"cabbage": "soup",
@ -368,21 +320,18 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
})
checkTime := func(m fs.Metadata, key string, when time.Time) {
t.Helper()
mt, ok := o.parseMetadataTime(m, key)
assert.True(t, ok)
dt := mt.Sub(when)
precision := time.Second
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
}
checkInt := func(m fs.Metadata, key string, base int) int {
t.Helper()
value, ok := o.parseMetadataInt(m, key, base)
assert.True(t, ok)
return value
}
t.Run("Read", func(t *testing.T) {
m, err := o.Metadata(ctx)
require.NoError(t, err)
@ -392,12 +341,13 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
checkInt(m, "mode", 8)
checkTime(m, "mtime", when)
assert.Equal(t, len(whenRFC), len(m["mtime"]))
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
if hasAtime {
checkTime(m, "atime", when)
}
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
if hasBtime {
checkTime(m, "btime", when)
}
if hasXID {
@ -421,10 +371,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
"mode": "0767",
"potato": "wedges",
}
if !canSetXattrOnLinks && o.translatedLink {
// Don't change xattr if not supported on symlinks
delete(newM, "potato")
}
err := o.writeMetadata(newM)
require.NoError(t, err)
@ -434,11 +380,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
mode := checkInt(m, "mode", 8)
if runtime.GOOS != "windows" {
expectedMode := 0767
if o.translatedLink && runtime.GOOS == "linux" {
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
}
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
}
checkTime(m, "mtime", newMtime)
@ -448,7 +390,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
if haveSetBTime {
checkTime(m, "btime", newBtime)
}
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
if xattrSupported {
assert.Equal(t, "wedges", m["potato"])
}
})

View file

@ -105,11 +105,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
}
if haveSetBTime {
if btimeOK {
if o.translatedLink {
err = lsetBTime(o.path, btime)
} else {
err = setBTime(o.path, btime)
}
err = setBTime(o.path, btime)
if err != nil {
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
}
@ -125,11 +121,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
} else {
if o.translatedLink {
err = os.Lchown(o.path, uid, gid)
} else {
err = os.Chown(o.path, uid, gid)
}
err = os.Chown(o.path, uid, gid)
if err != nil {
outErr = fmt.Errorf("failed to change ownership: %w", err)
}
@ -140,16 +132,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
if mode >= 0 {
umode := uint(mode)
if umode <= math.MaxUint32 {
if o.translatedLink {
if haveLChmod {
err = lChmod(o.path, os.FileMode(umode))
} else {
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
err = nil
}
} else {
err = os.Chmod(o.path, os.FileMode(umode))
}
err = os.Chmod(o.path, os.FileMode(umode))
if err != nil {
outErr = fmt.Errorf("failed to change permissions: %w", err)
}

View file

@ -13,9 +13,3 @@ func setBTime(name string, btime time.Time) error {
// Does nothing
return nil
}
// lsetBTime changes the birth time of the link passed in
func lsetBTime(name string, btime time.Time) error {
// Does nothing
return nil
}

View file

@ -9,20 +9,15 @@ import (
const haveSetBTime = true
// setTimes sets any of atime, mtime or btime
// if link is set it sets a link rather than the target
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
pathp, err := syscall.UTF16PtrFromString(name)
if err != nil {
return err
}
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
if link {
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
}
h, err := syscall.CreateFile(pathp,
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
syscall.OPEN_EXISTING, fileFlag, 0)
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return err
}
@ -32,28 +27,6 @@ func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error)
err = closeErr
}
}()
var patime, pmtime, pbtime *syscall.Filetime
if !atime.IsZero() {
t := syscall.NsecToFiletime(atime.UnixNano())
patime = &t
}
if !mtime.IsZero() {
t := syscall.NsecToFiletime(mtime.UnixNano())
pmtime = &t
}
if !btime.IsZero() {
t := syscall.NsecToFiletime(btime.UnixNano())
pbtime = &t
}
return syscall.SetFileTime(h, pbtime, patime, pmtime)
}
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
return setTimes(name, time.Time{}, time.Time{}, btime, false)
}
// lsetBTime changes the birth time of the link passed in
func lsetBTime(name string, btime time.Time) error {
return setTimes(name, time.Time{}, time.Time{}, btime, true)
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
return syscall.SetFileTime(h, &bFileTime, nil, nil)
}

View file

@ -68,12 +68,14 @@ var (
)
// Description of how to authorize
var oauthConfig = &oauthutil.Config{
var oauthConfig = &oauth2.Config{
ClientID: api.OAuthClientID,
ClientSecret: "",
AuthURL: api.OAuthURL,
TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
Endpoint: oauth2.Endpoint{
AuthURL: api.OAuthURL,
TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
},
}
// Register with Fs
@ -436,9 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
if err != nil || !tokenIsValid(t) {
fs.Infof(f, "Valid token not found, authorizing.")
ctx := oauthutil.Context(ctx, f.cli)
oauth2Conf := oauthConfig.MakeOauth2Config()
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
}
if err == nil && !tokenIsValid(t) {
err = errors.New("invalid token")

View file

@ -202,14 +202,9 @@ type SharingLinkType struct {
type LinkType string
const (
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
ViewLinkType LinkType = "view"
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
EditLinkType LinkType = "edit"
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
// content into a host webpage. Embed links are not available for OneDrive for
// Business or SharePoint.
EmbedLinkType LinkType = "embed"
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
)
// LinkScope represents the scope of the link represented by this permission.
@ -217,12 +212,9 @@ const (
type LinkScope string
const (
// AnonymousScope = Anyone with the link has access, without needing to sign in.
// This may include people outside of your organization.
AnonymousScope LinkScope = "anonymous"
// OrganizationScope = Anyone signed into your organization (tenant) can use the
// link to get access. Only available in OneDrive for Business and SharePoint.
OrganizationScope LinkScope = "organization"
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
)
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
@ -244,14 +236,10 @@ type PermissionsType struct {
type Role string
const (
// ReadRole provides the ability to read the metadata and contents of the item.
ReadRole Role = "read"
// WriteRole provides the ability to read and modify the metadata and contents of the item.
WriteRole Role = "write"
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
OwnerRole Role = "owner"
// MemberRole represents the member role for SharePoint and OneDrive for Business.
MemberRole Role = "member"
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
)
// PermissionsResponse is the response to the list permissions method

View file

@ -40,6 +40,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@ -64,21 +65,14 @@ const (
// Globals
var (
// Define the paths used for token operations
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
authPath = "/oauth2/v2.0/authorize"
tokenPath = "/oauth2/v2.0/token"
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// When using client credential OAuth flow, scope of .default is required in order
// to use the permissions configured for the application within the tenant
scopeAccessClientCred = fs.SpaceSepList{".default"}
// Base config for how to auth
oauthConfig = &oauthutil.Config{
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Scopes: scopeAccess,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@ -189,14 +183,6 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
},
},
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
Set this if using
- Client Credential flow
`,
Sensitive: true,
}, {
Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission.
@ -541,54 +527,28 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
})
}
// Make the oauth config for the backend
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
// Copy the default oauthConfig
oauthConfig := *oauthConfig
// Set the scopes
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
// Construct the auth URLs
prefix := commonPathPrefix
if opt.Tenant != "" {
prefix = "/" + opt.Tenant
}
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
// Check to see if we are using client credentials flow
if opt.ClientCredentials {
// Override scope to .default
oauthConfig.Scopes = scopeAccessClientCred
if opt.Tenant == "" {
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
}
}
return &oauthConfig, nil
}
// Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
_, graphURL := getRegionURL(m)
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
region, graphURL := getRegionURL(m)
// Check to see if this is the start of the state machine execution
if conf.State == "" {
conf, err := makeOauthConfig(ctx, opt)
if config.State == "" {
var accessScopes fs.SpaceSepList
accessScopesString, _ := m.Get("access_scopes")
err := accessScopes.Set(accessScopesString)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
}
oauthConfig.Scopes = []string(accessScopes)
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: conf,
OAuth2Config: oauthConfig,
})
}
@ -596,11 +556,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
if err != nil {
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
// Create a REST client, build on the OAuth client created above
srv := rest.NewClient(oAuthClient)
switch conf.State {
switch config.State {
case "choose_type":
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive",
@ -626,7 +584,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}})
case "choose_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result)
return fs.ConfigGoto(config.Result)
case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
@ -644,22 +602,16 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
},
})
case "driveid":
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
if err != nil {
return out, err
}
// Default the drive_id to the previous version in the config
out.Option.Default, _ = m.Get("drive_id")
return out, nil
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: conf.Result,
finalDriveID: config.Result,
})
case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result,
siteID: config.Result,
})
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
@ -670,7 +622,7 @@ Examples:
- "https://XXX.sharepoint.com/teams/ID"
`)
case "url_end":
siteURL := conf.Result
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
@ -685,12 +637,12 @@ Examples:
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: conf.Result,
relativePath: config.Result,
})
case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end":
searchTerm := conf.Result
searchTerm := config.Result
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
@ -712,10 +664,10 @@ Examples:
})
case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result,
siteID: config.Result,
})
case "driveid_final":
finalDriveID := conf.Result
finalDriveID := config.Result
// Test the driveID and get drive type
opts := rest.Opts{
@ -734,12 +686,12 @@ Examples:
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end":
if conf.Result == "true" {
if config.Result == "true" {
return nil, nil
}
return fs.ConfigGoto("choose_type")
}
return nil, fmt.Errorf("unknown state %q", conf.State)
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
@ -750,9 +702,7 @@ type Options struct {
DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"`
ClientCredentials bool `config:"client_credentials"`
AccessScopes fs.SpaceSepList `config:"access_scopes"`
Tenant string `config:"tenant"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
@ -877,7 +827,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
retry = true
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
}
case 429, 503: // Too Many Requests, Server Too Busy
case 429: // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
retryAfter, parseErr := strconv.Atoi(values[0])
@ -1040,10 +990,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
oauthConfig, err := makeOauthConfig(ctx, opt)
if err != nil {
return nil, err
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
}
client := fshttp.NewClient(ctx)
@ -1656,7 +1609,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
@ -1671,18 +1624,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Check we aren't overwriting a file on the same remote
if srcObj.fs == f {
srcPath := srcObj.rootPath()
@ -2610,11 +2556,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("can't upload content to a OneNote file")
}
// Only start the renewer if we have a valid one
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()

View file

@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
compareDirMeta(expectedMeta, actualMeta, false)
// modtime
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
// try changing it and re-check it
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
assert.NoError(t, err)
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
// ensure that f.DirSetModTime also works
err = f.DirSetModTime(ctx, "subdir", t3)
assert.NoError(t, err)
@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
assert.NoError(t, err)
entries.ForDir(func(dir fs.Directory) {
if dir.Remote() == "subdir" {
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
}
})

View file

@ -106,9 +106,9 @@ func newOptions() []fs.Option {
Sensitive: true,
}, {
Name: "compartment",
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: false,
Required: true,
Sensitive: true,
}, {
Name: "region",

View file

@ -48,10 +48,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -59,8 +61,8 @@ var (
)
// Update the TokenURL with the actual hostname
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
}
// Register with Fs
@ -77,7 +79,7 @@ func init() {
fs.Errorf(nil, "Failed to read config: %v", err)
}
updateTokenURL(oauthConfig, optc.Hostname)
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
return errors.New("form not found in response")
}
@ -397,15 +399,14 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
return nil, fmt.Errorf("close file: %w", err)
}
writer := &writerAt{
ctx: ctx,
client: client,
fs: f,
size: size,
remote: remote,
fd: openResult.FileDescriptor,
fileID: openResult.Fileid,
}

View file

@ -18,14 +18,21 @@ import (
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
type writerAt struct {
ctx context.Context
client *rest.Client
fs *Fs
size int64
remote string
fd int64
fileID int64
}
// Close implements WriterAt.Close.
func (c *writerAt) Close() error {
// close fd
if _, err := c.fileClose(c.ctx); err != nil {
return fmt.Errorf("close fd: %w", err)
}
// Avoiding race conditions: Depending on the tcp connection, there might be
// caching issues when checking the size immediately after write.
// Hence we try avoiding them by checking the resulting size on a different connection.
@ -65,18 +72,8 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
inSHA1Bytes := sha1.Sum(buffer)
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
client, err := c.fs.newSingleConnClient(c.ctx)
if err != nil {
return 0, fmt.Errorf("create client: %w", err)
}
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
if err != nil {
return 0, fmt.Errorf("open file: %w", err)
}
// get target hash
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
if err != nil {
return 0, err
}
@ -92,15 +89,10 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
}
// upload buffer with offset if necessary
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
return 0, err
}
// close fd
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
return contentLength, fmt.Errorf("close fd: %w", err)
}
return contentLength, nil
}
@ -133,40 +125,11 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
return result, nil
}
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_open",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
opts.Parameters.Set("flags", "0x0002") // O_WRITE
result := &api.FileOpenResponse{}
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("open new file descriptor: %w", err)
}
return result, nil
}
// Call pcloud file_checksum, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
func fileChecksum(
func (c *writerAt) fileChecksum(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd, offset, count int64,
offset, count int64,
) (*api.FileChecksumResponse, error) {
opts := rest.Opts{
Method: "PUT",
@ -177,29 +140,26 @@ func fileChecksum(
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
result := &api.FileChecksumResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
}
return result, nil
}
// Call pcloud file_pwrite, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
func filePWrite(
func (c *writerAt) filePWrite(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
offset int64,
buf []byte,
) (*api.FilePWriteResponse, error) {
@ -216,29 +176,24 @@ func filePWrite(
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
result := &api.FilePWriteResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
}
return result, nil
}
// Call pcloud file_close, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
func fileClose(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
) (*api.FileCloseResponse, error) {
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_close",
@ -246,11 +201,11 @@ func fileClose(
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Close: true,
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
result := &api.FileCloseResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})

View file

@ -82,11 +82,13 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: clientID,
RedirectURL: oauthutil.RedirectURL,
}
@ -213,11 +215,6 @@ Fill in for rclone to use a non root folder as its starting point.
Default: false,
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "no_media_link",
Default: false,
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
Advanced: true,
}, {
Name: "hash_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
@ -291,7 +288,6 @@ type Options struct {
RootFolderID string `config:"root_folder_id"`
UseTrash bool `config:"use_trash"`
TrashedOnly bool `config:"trashed_only"`
NoMediaLink bool `config:"no_media_link"`
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
@ -565,7 +561,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
if strings.Contains(err.Error(), "invalid_grant") {
return f, f.reAuthorize(ctx)
}
return nil, err
}
return f, nil
@ -1581,14 +1576,15 @@ func (o *Object) setMetaData(info *api.File) (err error) {
o.md5sum = info.Md5Checksum
if info.Links.ApplicationOctetStream != nil {
o.link = info.Links.ApplicationOctetStream
if !o.fs.opt.NoMediaLink {
if fid := parseFileID(o.link.URL); fid != "" {
for _, media := range info.Medias {
if media.Link != nil && parseFileID(media.Link.URL) == fid {
fs.Debugf(o, "Using a media link")
o.link = media.Link
break
}
if fid := parseFileID(o.link.URL); fid != "" {
for mid, media := range info.Medias {
if media.Link == nil {
continue
}
if mfid := parseFileID(media.Link.URL); fid == mfid {
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
o.link = media.Link
break
}
}
}

View file

@ -43,6 +43,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@ -58,10 +59,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,

View file

@ -572,17 +572,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
// We have successfully copied the file to random name
// Check to see if file already exists first and delete it if so
existingObj, err := f.NewObject(ctx, remote)
if err == nil {
err = existingObj.Remove(ctx)
if err != nil {
return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err)
}
}
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))

View file

@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
/*
@ -40,10 +41,12 @@ const (
var (
// Description of how to auth for this app
putioConfig = &oauthutil.Config{
Scopes: []string{},
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
putioConfig = &oauth2.Config{
Scopes: []string{},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,

View file

@ -136,9 +136,6 @@ var providerOption = fs.Option{
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "Outscale",
Help: "OUTSCALE Object Storage (OOS)",
}, {
Value: "Petabox",
Help: "Petabox Object Storage",
@ -154,9 +151,6 @@ var providerOption = fs.Option{
}, {
Value: "SeaweedFS",
Help: "SeaweedFS S3",
}, {
Value: "Selectel",
Help: "Selectel Object Storage",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
@ -494,26 +488,6 @@ func init() {
Value: "eu-south-2",
Help: "Logrono, Spain",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "Outscale",
Examples: []fs.OptionExample{{
Value: "eu-west-2",
Help: "Paris, France",
}, {
Value: "us-east-2",
Help: "New Jersey, USA",
}, {
Value: "us-west-1",
Help: "California, USA",
}, {
Value: "cloudgouv-eu-west-1",
Help: "SecNumCloud, Paris, France",
}, {
Value: "ap-northeast-1",
Help: "Tokyo, Japan",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
@ -554,19 +528,10 @@ func init() {
Value: "tw-001",
Help: "Asia (Taiwan)",
}},
}, {
// See endpoints for object storage regions: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
Name: "region",
Help: "Region where your data stored.\n",
Provider: "Selectel",
Examples: []fs.OptionExample{{
Value: "ru-1",
Help: "St. Petersburg",
}},
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@ -1331,19 +1296,10 @@ func init() {
Value: "s3-ap-northeast-1.qiniucs.com",
Help: "Northeast Asia Endpoint 1",
}},
}, {
// Selectel endpoints: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
Name: "endpoint",
Help: "Endpoint for Selectel Object Storage.",
Provider: "Selectel",
Examples: []fs.OptionExample{{
Value: "s3.ru-1.storage.selcloud.ru",
Help: "Saint Petersburg",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@ -1388,26 +1344,6 @@ func init() {
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
Provider: "LyveCloud",
}, {
Value: "oos.eu-west-2.outscale.com",
Help: "Outscale EU West 2 (Paris)",
Provider: "Outscale",
}, {
Value: "oos.us-east-2.outscale.com",
Help: "Outscale US east 2 (New Jersey)",
Provider: "Outscale",
}, {
Value: "oos.us-west-1.outscale.com",
Help: "Outscale EU West 1 (California)",
Provider: "Outscale",
}, {
Value: "oos.cloudgouv-eu-west-1.outscale.com",
Help: "Outscale SecNumCloud (Paris)",
Provider: "Outscale",
}, {
Value: "oos.ap-northeast-1.outscale.com",
Help: "Outscale AP Northeast 1 (Japan)",
Provider: "Outscale",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East 1 (N. Virginia)",
@ -1444,10 +1380,6 @@ func init() {
Value: "s3.eu-west-2.wasabisys.com",
Help: "Wasabi EU West 2 (Paris)",
Provider: "Wasabi",
}, {
Value: "s3.eu-south-1.wasabisys.com",
Help: "Wasabi EU South 1 (Milan)",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
@ -1866,7 +1798,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@ -1881,7 +1813,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
Provider: "!Storj,Selectel,Synology,Cloudflare",
Provider: "!Storj,Synology,Cloudflare",
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
@ -3427,8 +3359,6 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
useMultipartEtag = false // untested
useAlreadyExists = false // untested
case "Outscale":
virtualHostStyle = false
case "RackCorp":
// No quirks
useMultipartEtag = false // untested
@ -3451,8 +3381,6 @@ func setQuirks(opt *Options) {
}
urlEncodeListings = true
useAlreadyExists = true
case "Selectel":
urlEncodeListings = false
case "SeaweedFS":
listObjectsV2 = false // untested
virtualHostStyle = false
@ -3470,10 +3398,6 @@ func setQuirks(opt *Options) {
opt.ChunkSize = 64 * fs.Mebi
}
useAlreadyExists = false // returns BucketAlreadyExists
// Storj doesn't support multi-part server side copy:
// https://github.com/storj/roadmap/issues/40
// So make cutoff very large which it does support
opt.CopyCutoff = math.MaxInt64
case "Synology":
useMultipartEtag = false
useAlreadyExists = false // untested
@ -5860,31 +5784,12 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
ContentEncoding: header("Content-Encoding"),
ContentLanguage: header("Content-Language"),
ContentType: header("Content-Type"),
StorageClass: types.StorageClass(deref(header("X-Amz-Storage-Class"))),
StorageClass: types.StorageClass(*header("X-Amz-Storage-Class")),
}
o.setMetaData(&head)
return resp.Body, err
}
// middleware to stop the SDK adding `Accept-Encoding: identity`
func removeDisableGzip() func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
return err
}
}
// middleware to set Accept-Encoding to how we want it
//
// This make sure we download compressed files as-is from all platforms
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
APIOptions = append(APIOptions, removeDisableGzip())
if f.opt.UseAcceptEncodingGzip.Value {
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
}
return APIOptions
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bucket, bucketPath := o.split()
@ -5918,8 +5823,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var APIOptions []func(*middleware.Stack) error
// Set the SDK to always download compressed files as-is
APIOptions = append(APIOptions, o.fs.acceptEncoding()...)
// Override the automatic decompression in the transport to
// download compressed files as-is
if o.fs.opt.UseAcceptEncodingGzip.Value {
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
}
for _, option := range options {
switch option.(type) {
@ -6070,8 +5978,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: ui.req.Bucket,
key: ui.req.Key,
bucket: mOut.Bucket,
key: mOut.Key,
uploadID: mOut.UploadId,
multiPartUploadInput: &mReq,
completedParts: make([]types.CompletedPart, 0),

View file

@ -23,20 +23,14 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := &fstests.Opt{
fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:",
NilObject: (*Object)(nil),
TiersToTest: []string{"STANDARD"},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
}
// Test wider range of tiers on AWS
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
}
fstests.Run(t, opt)
})
}
func TestIntegration2(t *testing.T) {

View file

@ -99,11 +99,6 @@ Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
in the new OpenSSH format can't be used.`,
IsPassword: true,
Sensitive: true,
}, {
Name: "pubkey",
Help: `SSH public certificate for public certificate based authentication.
Set this if you have a signed certificate you want to use for authentication.
If specified will override pubkey_file.`,
}, {
Name: "pubkey_file",
Help: `Optional path to public key file.
@ -516,7 +511,6 @@ type Options struct {
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKey string `config:"pubkey"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
@ -1003,21 +997,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// If a public key has been specified then use that
if pubkeyFile != "" || opt.PubKey != "" {
pubKeyRaw := []byte(opt.PubKey)
// Use this error if public key is provided inline and is not a certificate
// if public key file is provided instead, use the err in the if block
notACertError := errors.New("public key provided is not a certificate: " + opt.PubKey)
if opt.PubKey == "" {
notACertError = errors.New("public key file is not a certificate file: " + pubkeyFile)
err := error(nil)
pubKeyRaw, err = os.ReadFile(pubkeyFile)
if err != nil {
return nil, fmt.Errorf("unable to read cert file: %w", err)
}
if pubkeyFile != "" {
certfile, err := os.ReadFile(pubkeyFile)
if err != nil {
return nil, fmt.Errorf("unable to read cert file: %w", err)
}
pk, _, _, _, err := ssh.ParseAuthorizedKey(pubKeyRaw)
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
if err != nil {
return nil, fmt.Errorf("unable to parse cert file: %w", err)
}
@ -1031,7 +1017,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// knows everything it needs.
cert, ok := pk.(*ssh.Certificate)
if !ok {
return nil, notACertError
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
}
pubsigner, err := ssh.NewCertSigner(cert, signer)
if err != nil {
@ -2101,10 +2087,10 @@ func (file *objectReader) Read(p []byte) (n int, err error) {
// Close a reader of a remote sftp file
func (file *objectReader) Close() (err error) {
// Close the pipeReader so writes to the pipeWriter fail
_ = file.pipeReader.Close()
// Close the sftpFile - this will likely cause the WriteTo to error
err = file.sftpFile.Close()
// Close the pipeReader so writes to the pipeWriter fail
_ = file.pipeReader.Close()
// Wait for the background process to finish
<-file.done
// Show connection no longer in use

View file

@ -97,6 +97,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@ -114,11 +115,13 @@ const (
)
// Generate a new oauth2 config which we will update when we know the TokenURL
func newOauthConfig(tokenURL string) *oauthutil.Config {
return &oauthutil.Config{
Scopes: nil,
AuthURL: "https://secure.sharefile.com/oauth/authorize",
TokenURL: tokenURL,
func newOauthConfig(tokenURL string) *oauth2.Config {
return &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://secure.sharefile.com/oauth/authorize",
TokenURL: tokenURL,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectPublicSecureURL,
@ -133,7 +136,7 @@ func init() {
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
return errors.New("endpoint not found in response")
}
@ -144,7 +147,7 @@ func init() {
}
endpoint := "https://" + subdomain + "." + apicp
m.Set("endpoint", endpoint)
oauthConfig.TokenURL = endpoint + tokenPath
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{

View file

@ -35,7 +35,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
@ -868,13 +867,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
@ -891,13 +890,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, err
}
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Copy the object
opts := rest.Opts{
Method: "POST",

View file

@ -22,13 +22,13 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
// oAuth
@ -46,9 +46,11 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@ -711,7 +713,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
@ -719,21 +721,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
}
dstPath := f.filePath(remote)
err = f.mkParentDirs(ctx, dstPath)
err := f.mkParentDirs(ctx, dstPath)
if err != nil {
return nil, err
}
// Find and remove existing object
//
// Note that the overwrite flag doesn't seem to work for server side copy
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
if err != nil {
return nil, fmt.Errorf("couldn't copy file: %w", err)
}

View file

@ -47,7 +47,7 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
oauthConfig = &oauth2.Config{
Scopes: []string{
"aaaserver.profile.read",
"WorkDrive.team.READ",
@ -55,10 +55,11 @@ var (
"WorkDrive.files.ALL",
"ZohoFiles.files.ALL",
},
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
AuthStyle: oauth2.AuthStyleInParams,
Endpoint: oauth2.Endpoint{
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@ -275,8 +276,8 @@ func setupRegion(m configmap.Mapper) error {
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
return nil
}

View file

@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
continue
fi
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1)
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
if [ "$commit" == "" ]; then
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1)
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
fi
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1)
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
echo $backend $version
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
done

View file

@ -52,7 +52,6 @@ docs = [
"hidrive.md",
"http.md",
"imagekit.md",
"iclouddrive.md",
"internetarchive.md",
"jottacloud.md",
"koofr.md",

View file

@ -7,18 +7,15 @@ Run with no arguments to test all backends or a supply a list of
backends to test.
"""
import os
import re
import sys
import subprocess
all_backends = "backend/all/all.go"
# compile command which is more or less like the production builds
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
# disable CGO as that makes a lot of difference to binary size
os.environ["CGO_ENABLED"]="0"
import os
import re
import sys
import subprocess
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
@ -46,9 +43,6 @@ def write_all(orig_all, backend):
# Comment out line matching backend
if match and match.group(1) == backend:
line = "// " + line
# s3 and pikpak depend on each other
if backend == "s3" and "pikpak" in line:
line = "// " + line
fd.write(line+"\n")
def compile():

View file

@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
exit 1
fi
VERSION="$1"
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
cat > "/tmp/${VERSION}-release-notes" <<EOF
This is the ${VERSION} release of rclone.

View file

@ -5,13 +5,20 @@ import (
"bytes"
"log"
"github.com/rclone/rclone/fs"
"github.com/sirupsen/logrus"
)
// CaptureOutput runs a function capturing its output.
func CaptureOutput(fun func()) []byte {
logSave := log.Writer()
logrusSave := logrus.StandardLogger().Out
logrusSave := logrus.StandardLogger().Writer()
defer func() {
err := logrusSave.Close()
if err != nil {
fs.Errorf(nil, "error closing logrusSave: %v", err)
}
}()
buf := &bytes.Buffer{}
log.SetOutput(buf)
logrus.SetOutput(buf)

View file

@ -66,8 +66,7 @@ func quotePath(path string) string {
return escapePath(path, true)
}
// Colors controls whether terminal colors are enabled
var Colors bool
var Colors bool // Colors controls whether terminal colors are enabled
// Color handles terminal colors for bisync
func Color(style string, s string) string {

View file

@ -83,13 +83,12 @@ func ShowVersion() {
// It returns a string with the file name if points to a file
// otherwise "".
func NewFsFile(remote string) (fs.Fs, string) {
ctx := context.Background()
_, fsPath, err := fspath.SplitFs(remote)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
}
f, err := cache.Get(ctx, remote)
f, err := cache.Get(context.Background(), remote)
switch err {
case fs.ErrorIsFile:
cache.Pin(f) // pin indefinitely since it was on the CLI
@ -98,7 +97,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
cache.Pin(f) // pin indefinitely since it was on the CLI
return f, ""
default:
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
}
return nil, ""
@ -109,19 +108,18 @@ func NewFsFile(remote string) (fs.Fs, string) {
// This works the same as NewFsFile however it adds filters to the Fs
// to limit it to a single file if the remote pointed to a file.
func newFsFileAddFilter(remote string) (fs.Fs, string) {
ctx := context.Background()
fi := filter.GetConfig(ctx)
fi := filter.GetConfig(context.Background())
f, fileName := NewFsFile(remote)
if fileName != "" {
if !fi.InActive() {
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, err.Error())
}
// Limit transfers to this file
err := fi.AddFile(fileName)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to limit to single file %q: %v", remote, err)
}
}
@ -141,10 +139,9 @@ func NewFsSrc(args []string) fs.Fs {
//
// This must point to a directory
func newFsDir(remote string) fs.Fs {
ctx := context.Background()
f, err := cache.Get(ctx, remote)
f, err := cache.Get(context.Background(), remote)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
}
cache.Pin(f) // pin indefinitely since it was on the CLI
@ -178,7 +175,6 @@ func NewFsSrcFileDst(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs)
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
// If src is a file then srcFileName and dstFileName will be non-empty
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
ctx := context.Background()
fsrc, srcFileName = newFsFileAddFilter(args[0])
// If copying a file...
dstRemote := args[1]
@ -197,14 +193,14 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
fs.Fatalf(nil, "%q is a directory", args[1])
}
}
fdst, err := cache.Get(ctx, dstRemote)
fdst, err := cache.Get(context.Background(), dstRemote)
switch err {
case fs.ErrorIsFile:
_ = fs.CountError(ctx, err)
_ = fs.CountError(err)
fs.Fatalf(nil, "Source doesn't exist or is a directory and destination is a file")
case nil:
default:
_ = fs.CountError(ctx, err)
_ = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for destination %q: %v", dstRemote, err)
}
cache.Pin(fdst) // pin indefinitely since it was on the CLI
@ -238,8 +234,7 @@ func ShowStats() bool {
// Run the function with stats and retries if required
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
ci := fs.GetConfig(context.Background())
var cmdErr error
stopStats := func() {}
if !showStats && ShowStats() {
@ -253,7 +248,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
SigInfoHandler()
for try := 1; try <= ci.Retries; try++ {
cmdErr = f()
cmdErr = fs.CountError(ctx, cmdErr)
cmdErr = fs.CountError(cmdErr)
lastErr := accounting.GlobalStats().GetLastError()
if cmdErr == nil {
cmdErr = lastErr
@ -441,19 +436,19 @@ func initConfig() {
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
}
err = pprof.StartCPUProfile(f)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
}
atexit.Register(func() {
pprof.StopCPUProfile()
err := f.Close()
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
}
})
@ -465,17 +460,17 @@ func initConfig() {
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
}
err = pprof.WriteHeapProfile(f)
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
}
err = f.Close()
if err != nil {
err = fs.CountError(ctx, err)
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
}
})
@ -483,8 +478,7 @@ func initConfig() {
}
func resolveExitCode(err error) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
ci := fs.GetConfig(context.Background())
atexit.Run()
if err == nil {
if ci.ErrorOnNoTransfer {

View file

@ -121,6 +121,19 @@ func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, err
return leaf, dir, errc
}
// lookup a File given a path
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
file, ok := node.(*vfs.File)
if !ok {
return nil, -fuse.EISDIR
}
return file, 0
}
// get a node and handle from the path or from the fh if not fhUnset
//
// handle may be nil
@ -141,9 +154,15 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
Size := uint64(node.Size())
Blocks := (Size + 511) / 512
modTime := node.ModTime()
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else {
Mode |= fuse.S_IFREG
}
//stat.Dev = 1
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
stat.Mode = getMode(node)
stat.Mode = uint32(Mode)
stat.Nlink = 1
stat.Uid = fsys.VFS.Opt.UID
stat.Gid = fsys.VFS.Opt.GID
@ -490,15 +509,14 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer log.Trace(target, "newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
return translateError(fsys.VFS.Symlink(target, newpath))
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer log.Trace(path, "")("errc=%v, linkPath=%q", &errc, linkPath)
linkPath, err := fsys.VFS.Readlink(path)
return translateError(err), linkPath
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
@ -562,7 +580,7 @@ func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string
return errc, ""
}
normalisedPath = node.Path()
if !strings.HasPrefix(normalisedPath, "/") {
if !strings.HasPrefix("/", normalisedPath) {
normalisedPath = "/" + normalisedPath
}
return 0, normalisedPath
@ -597,8 +615,6 @@ func translateError(err error) (errc int) {
return -fuse.ENOSYS
case vfs.EINVAL:
return -fuse.EINVAL
case vfs.ELOOP:
return -fuse.ELOOP
}
fs.Errorf(nil, "IO error: %v", err)
return -fuse.EIO
@ -630,22 +646,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
return outFlags
}
// get the Mode from a vfs Node
func getMode(node os.FileInfo) uint32 {
vfsMode := node.Mode()
Mode := vfsMode.Perm()
if vfsMode&os.ModeDir != 0 {
Mode |= fuse.S_IFDIR
} else if vfsMode&os.ModeSymlink != 0 {
Mode |= fuse.S_IFLNK
} else if vfsMode&os.ModeNamedPipe != 0 {
Mode |= fuse.S_IFIFO
} else {
Mode |= fuse.S_IFREG
}
return uint32(Mode)
}
// Make sure interfaces are satisfied
var (
_ fuse.FileSystemInterface = (*FS)(nil)

View file

@ -10,6 +10,7 @@ import (
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/rclone/rclone/cmd/mountlib"
@ -34,6 +35,19 @@ func init() {
buildinfo.Tags = append(buildinfo.Tags, "cmount")
}
// Find the option string in the current options
func findOption(name string, options []string) (found bool) {
for _, option := range options {
if option == "-o" {
continue
}
if strings.Contains(option, name) {
return true
}
}
return false
}
// mountOptions configures the options from the command line flags
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
// Options
@ -79,9 +93,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
if VFS.Opt.ReadOnly {
options = append(options, "-o", "ro")
}
//if opt.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
//}
if opt.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
}
if runtime.GOOS == "darwin" {
if opt.VolumeName != "" {
options = append(options, "-o", "volname="+opt.VolumeName)
@ -97,7 +111,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
for _, option := range opt.ExtraOptions {
options = append(options, "-o", option)
}
options = append(options, opt.ExtraFlags...)
for _, option := range opt.ExtraFlags {
options = append(options, option)
}
return options
}

View file

@ -7,7 +7,6 @@ import (
"fmt"
"io"
"os"
"path"
"syscall"
"time"
@ -34,7 +33,7 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
a.Gid = d.VFS().Opt.GID
a.Uid = d.VFS().Opt.UID
a.Mode = d.Mode()
a.Mode = os.ModeDir | os.FileMode(d.VFS().Opt.DirPerms)
modTime := d.ModTime()
a.Atime = modTime
a.Mtime = modTime
@ -141,13 +140,11 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
// Create makes a new file
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
// translate the fuse flags to os flags
osFlags := int(req.Flags) | os.O_CREATE
file, err := d.Dir.Create(req.Name, osFlags)
file, err := d.Dir.Create(req.Name, int(req.Flags))
if err != nil {
return nil, nil, translateError(err)
}
fh, err := file.Open(osFlags)
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
if err != nil {
return nil, nil, translateError(err)
}
@ -203,6 +200,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
if !ok {
return fmt.Errorf("unknown Dir type %T", newDir)
}
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
if err != nil {
return translateError(err)
@ -241,24 +239,6 @@ func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node)
return nil, syscall.ENOSYS
}
var _ fusefs.NodeSymlinker = (*Dir)(nil)
// Symlink create a symbolic link.
func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) {
defer log.Trace(d, "newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err)
newName := path.Join(d.Path(), req.NewName)
target := req.Target
n, err := d.VFS().CreateSymlink(target, newName)
if err != nil {
return nil, err
}
node = &File{n.(*vfs.File), d.fsys}
return node, nil
}
// Check interface satisfied
var _ fusefs.NodeMknoder = (*Dir)(nil)

View file

@ -32,7 +32,7 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
Blocks := (Size + 511) / 512
a.Gid = f.VFS().Opt.GID
a.Uid = f.VFS().Opt.UID
a.Mode = f.File.Mode() &^ os.ModeAppend
a.Mode = os.FileMode(f.VFS().Opt.FilePerms)
a.Size = Size
a.Atime = modTime
a.Mtime = modTime
@ -129,11 +129,3 @@ func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) er
}
var _ fusefs.NodeRemovexattrer = (*File)(nil)
var _ fusefs.NodeReadlinker = (*File)(nil)
// Readlink read symbolic link target.
func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) {
defer log.Trace(f, "")("ret=%v, err=%v", &ret, &err)
return f.VFS().Readlink(f.Path())
}

View file

@ -100,8 +100,6 @@ func translateError(err error) error {
return syscall.ENOSYS
case vfs.EINVAL:
return fuse.Errno(syscall.EINVAL)
case vfs.ELOOP:
return fuse.Errno(syscall.ELOOP)
}
fs.Errorf(nil, "IO error: %v", err)
return err

View file

@ -51,14 +51,9 @@ func (f *FS) SetDebug(debug bool) {
// get the Mode from a vfs Node
func getMode(node os.FileInfo) uint32 {
vfsMode := node.Mode()
Mode := vfsMode.Perm()
if vfsMode&os.ModeDir != 0 {
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else if vfsMode&os.ModeSymlink != 0 {
Mode |= fuse.S_IFLNK
} else if vfsMode&os.ModeNamedPipe != 0 {
Mode |= fuse.S_IFIFO
} else {
Mode |= fuse.S_IFREG
}
@ -133,8 +128,6 @@ func translateError(err error) syscall.Errno {
return syscall.ENOSYS
case vfs.EINVAL:
return syscall.EINVAL
case vfs.ELOOP:
return syscall.ELOOP
}
fs.Errorf(nil, "IO error: %v", err)
return syscall.EIO

View file

@ -227,7 +227,7 @@ type dirStream struct {
// HasNext indicates if there are further entries. HasNext
// might be called on already closed streams.
func (ds *dirStream) HasNext() bool {
return ds.i < len(ds.nodes)+2
return ds.i < len(ds.nodes)
}
// Next retrieves the next entry. It is only called if HasNext
@ -235,22 +235,7 @@ func (ds *dirStream) HasNext() bool {
// indicate I/O errors
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
if ds.i == 0 {
ds.i++
return fuse.DirEntry{
Mode: fuse.S_IFDIR,
Name: ".",
Ino: 0, // FIXME
}, 0
} else if ds.i == 1 {
ds.i++
return fuse.DirEntry{
Mode: fuse.S_IFDIR,
Name: "..",
Ino: 0, // FIXME
}, 0
}
fi := ds.nodes[ds.i-2]
fi := ds.nodes[ds.i]
de = fuse.DirEntry{
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
// are considered.
@ -458,31 +443,3 @@ func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errn
}
var _ fusefs.NodeListxattrer = (*Node)(nil)
var _ fusefs.NodeReadlinker = (*Node)(nil)
// Readlink read symbolic link target.
func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) {
defer log.Trace(n, "")("ret=%v, err=%v", &ret, &err)
path := n.node.Path()
s, serr := n.node.VFS().Readlink(path)
return []byte(s), translateError(serr)
}
var _ fusefs.NodeSymlinker = (*Node)(nil)
// Symlink create symbolic link.
func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) {
defer log.Trace(n, "name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err)
fullPath := path.Join(n.node.Path(), name)
vfsNode, serr := n.node.VFS().CreateSymlink(target, fullPath)
if serr != nil {
return nil, translateError(serr)
}
n.fsys.setEntryOut(vfsNode, out)
newNode := newNode(n.fsys, vfsNode)
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
return newInode, 0
}

View file

@ -373,9 +373,6 @@ func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) {
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
if err != nil {
if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") {
return nil, fmt.Errorf("mounting is not supported when running from snap")
}
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
}
m.MountedOn = time.Now()

View file

@ -190,17 +190,16 @@ func (s *server) ModelNumber() string {
// Renders the root device descriptor.
func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
tmpl, err := data.GetTemplate()
if err != nil {
serveError(ctx, s, w, "Failed to load root descriptor template", err)
serveError(s, w, "Failed to load root descriptor template", err)
return
}
buffer := new(bytes.Buffer)
err = tmpl.Execute(buffer, s)
if err != nil {
serveError(ctx, s, w, "Failed to render root descriptor XML", err)
serveError(s, w, "Failed to render root descriptor XML", err)
return
}
@ -216,16 +215,15 @@ func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
// Handle a service control HTTP request.
func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
soapActionString := r.Header.Get("SOAPACTION")
soapAction, err := upnp.ParseActionHTTPHeader(soapActionString)
if err != nil {
serveError(ctx, s, w, "Could not parse SOAPACTION header", err)
serveError(s, w, "Could not parse SOAPACTION header", err)
return
}
var env soap.Envelope
if err := xml.NewDecoder(r.Body).Decode(&env); err != nil {
serveError(ctx, s, w, "Could not parse SOAP request body", err)
serveError(s, w, "Could not parse SOAP request body", err)
return
}
@ -259,7 +257,6 @@ func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte,
// Serves actual resources (media files).
func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
remotePath := r.URL.Path
node, err := s.vfs.Stat(r.URL.Path)
if err != nil {
@ -280,7 +277,7 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
file := node.(*vfs.File)
in, err := file.Open(os.O_RDONLY)
if err != nil {
serveError(ctx, node, w, "Could not open resource", err)
serveError(node, w, "Could not open resource", err)
return
}
defer fs.CheckClose(in, &err)

View file

@ -1,7 +1,6 @@
package dlna
import (
"context"
"crypto/md5"
"encoding/xml"
"fmt"
@ -108,7 +107,7 @@ func (lrw *loggingResponseWriter) logRequest(code int, err interface{}) {
err = ""
}
fs.LogLevelPrintf(level, lrw.request.URL, "%s %s %d %s %s",
fs.LogPrintf(level, lrw.request.URL, "%s %s %d %s %s",
lrw.request.RemoteAddr, lrw.request.Method, code,
lrw.request.Header.Get("SOAPACTION"), err)
}
@ -143,10 +142,9 @@ func logging(next http.Handler) http.Handler {
// Error recovery and general request logging are left to logging().
func traceLogging(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
dump, err := httputil.DumpRequest(r, true)
if err != nil {
serveError(ctx, nil, w, "error dumping request", err)
serveError(nil, w, "error dumping request", err)
return
}
fs.Debugf(nil, "%s", dump)
@ -184,8 +182,8 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
}
// serveError returns an http.StatusInternalServerError and logs the error
func serveError(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) {
err = fs.CountError(ctx, err)
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
err = fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}

View file

@ -186,7 +186,6 @@ func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
// serveDir serves a directory index at dirRemote
func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
ctx := r.Context()
VFS, err := s.getVFS(r.Context())
if err != nil {
http.Error(w, "Root directory not found", http.StatusNotFound)
@ -199,7 +198,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
http.Error(w, "Directory not found", http.StatusNotFound)
return
} else if err != nil {
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
serve.Error(dirRemote, w, "Failed to list directory", err)
return
}
if !node.IsDir() {
@ -209,7 +208,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
dir := node.(*vfs.Dir)
dirEntries, err := dir.ReadDirAll()
if err != nil {
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
serve.Error(dirRemote, w, "Failed to list directory", err)
return
}
@ -235,7 +234,6 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
// serveFile serves a file object at remote
func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
ctx := r.Context()
VFS, err := s.getVFS(r.Context())
if err != nil {
http.Error(w, "File not found", http.StatusNotFound)
@ -249,7 +247,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string)
http.Error(w, "File not found", http.StatusNotFound)
return
} else if err != nil {
serve.Error(ctx, remote, w, "Failed to find file", err)
serve.Error(remote, w, "Failed to find file", err)
return
}
if !node.IsFile() {
@ -289,7 +287,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string)
// open the object
in, err := file.Open(os.O_RDONLY)
if err != nil {
serve.Error(ctx, remote, w, "Failed to open file", err)
serve.Error(remote, w, "Failed to open file", err)
return
}
defer func() {

View file

@ -142,16 +142,16 @@ func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
return fi, nil
}
// Symlink creates a link pointing to target
// Symlink is not supported over NFS
func (f *FS) Symlink(target, link string) (err error) {
defer log.Trace(target, "link=%q", link)("err=%v", &err)
return f.vfs.Symlink(target, link)
return os.ErrInvalid
}
// Readlink reads the contents of link
// Readlink is not supported
func (f *FS) Readlink(link string) (result string, err error) {
defer log.Trace(link, "")("result=%q, err=%v", &result, &err)
return f.vfs.Readlink(link)
return "", os.ErrInvalid
}
// Chmod changes the file modes

View file

@ -65,7 +65,7 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
if s.proxy == nil {
return s.vfs
}
if sshConn.Permissions == nil || sshConn.Permissions.Extensions == nil {
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
fs.Infof(what, "SSH Permissions Extensions not found")
return nil
}
@ -143,13 +143,8 @@ func (s *server) serve() (err error) {
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
// If user set the flag away from the default then report an error
if s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
if err != nil {
return err
}
if len(authorizedKeysMap) == 0 {
return fmt.Errorf("failed to parse authorized keys")
}
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
return err
}
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
}
@ -354,10 +349,11 @@ func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string
authorizedKeysMap = make(map[string]struct{})
for len(authorizedKeysBytes) > 0 {
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
if err == nil {
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
authorizedKeysBytes = bytes.TrimSpace(rest)
if err != nil {
return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
}
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
authorizedKeysBytes = bytes.TrimSpace(rest)
}
return authorizedKeysMap, nil
}

View file

@ -349,7 +349,6 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
// serveDir serves a directory index at dirRemote
// This is similar to serveDir in serve http.
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
ctx := r.Context()
VFS, err := w.getVFS(r.Context())
if err != nil {
http.Error(rw, "Root directory not found", http.StatusNotFound)
@ -362,7 +361,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
http.Error(rw, "Directory not found", http.StatusNotFound)
return
} else if err != nil {
serve.Error(ctx, dirRemote, rw, "Failed to list directory", err)
serve.Error(dirRemote, rw, "Failed to list directory", err)
return
}
if !node.IsDir() {
@ -373,7 +372,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
dirEntries, err := dir.ReadDirAll()
if err != nil {
serve.Error(ctx, dirRemote, rw, "Failed to list directory", err)
serve.Error(dirRemote, rw, "Failed to list directory", err)
return
}

View file

@ -30,7 +30,6 @@ var (
maxFileSize = fs.SizeSuffix(100)
minFileNameLength = 4
maxFileNameLength = 12
flat = false
seed = int64(1)
zero = false
sparse = false
@ -56,7 +55,6 @@ func init() {
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "")
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "")
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "")
flags.BoolVarP(makefilesFlags, &flat, "flat", "", false, "If set create all files in the root directory", "")
test.Command.AddCommand(makefileCmd)
makefileFlags := makefileCmd.Flags()
@ -83,9 +81,6 @@ var makefilesCmd = &cobra.Command{
commonInit()
outputDirectory := args[0]
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
if flat {
directoriesToCreate = 0
}
averageSize := (minFileSize + maxFileSize) / 2
start := time.Now()
fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)

View file

@ -15,7 +15,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestEnvironmentVariables demonstrates and verifies the test functions for end-to-end testing of rclone
// TestCmdTest demonstrates and verifies the test functions for end-to-end testing of rclone
func TestEnvironmentVariables(t *testing.T) {
createTestEnvironment(t)

View file

@ -66,13 +66,12 @@ so it is easy to tweak stuff.
└── static - static content for the website
├── css
│   ├── bootstrap.css
│   └── custom.css - custom css goes here
├── fontawesome
│   ├── css
│   └── webfonts
│   ├── custom.css - custom css goes here
│   └── font-awesome.css
├── img - images used
├── js
│   ├── bootstrap.js
│   ├── custom.js - custom javascript goes here
│   └── jquery.js
└── webfonts
```

View file

@ -132,7 +132,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
@ -160,7 +159,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
@ -178,7 +176,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}

View file

@ -895,27 +895,3 @@ put them back in again.` >}}
* rishi.sridhar <rishi.sridhar@zohocorp.com>
* Lawrence Murray <lawrence@indii.org>
* Leandro Piccilli <leandro.piccilli@thalesgroup.com>
* Benjamin Legrand <benjamin.legrand@seagate.com>
* Noam Ross <noam.ross@gmail.com>
* lostb1t <coding-mosses0z@icloud.com>
* Matthias Gatto <matthias.gatto@outscale.com>
* André Tran <andre.tran@outscale.com>
* Simon Bos <simon@simonbos.be>
* Alexandre Hamez <199517+ahamez@users.noreply.github.com>
* Randy Bush <randy@psg.com>
* Diego Monti <diegmonti@users.noreply.github.com>
* tgfisher <tgfisher@stanford.edu>
* Moises Lima <mozlima@gmail.com>
* Dimitar Ivanov <mimiteto@gmail.com>
* shenpengfeng <xinhangzhou@icloud.com>
* Dimitrios Slamaris <dim0x69@users.noreply.github.com>
* vintagefuture <39503528+vintagefuture@users.noreply.github.com>
* David Seifert <soap@gentoo.org>
* Michael R. Davis <mrdvt92@users.noreply.github.com>
* remygrandin <remy.gr@ndin.fr>
* Ilias Ozgur Can Leonard <iscilyas@gmail.com>
* divinity76 <divinity76@gmail.com>
* Martin Hassack <martin@redmaple.tech>
* Filipe Azevedo <pasnox@gmail.com>
* hayden.pan <hayden.pan@outlook.com>
* Yxxx <45665172+marsjane@users.noreply.github.com>

View file

@ -64,7 +64,7 @@ If not sure try Y. If Y failed, try N.
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=XXXXXXXXXXXXXXXXXXXXXX
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
Got code

View file

@ -5,36 +5,6 @@ description: "Rclone Changelog"
# Changelog
## v1.68.2 - 2024-11-15
[See commits](https://github.com/rclone/rclone/compare/v1.68.1...v1.68.2)
* Security fixes
* local backend: CVE-2024-52522: fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
* Only affects users using `--metadata` and `--links` and copying files to the local backend
* See https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
* build: bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (dependabot)
* This is an issue in a dependency which is used for JWT certificates
* See https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r
* Bug Fixes
* accounting: Fix wrong message on SIGUSR2 to enable/disable bwlimit (Nick Craig-Wood)
* bisync: Fix output capture restoring the wrong output for logrus (Dimitrios Slamaris)
* dlna: Fix loggingResponseWriter disregarding log level (Simon Bos)
* serve s3: Fix excess locking which was making serve s3 single threaded (Nick Craig-Wood)
* doc fixes (Nick Craig-Wood, tgfisher, Alexandre Hamez, Randy Bush)
* Local
* Fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
* Fix `--copy-links` on macOS when cloning (nielash)
* Onedrive
* Fix Retry-After handling to look at 503 errors also (Nick Craig-Wood)
* Pikpak
* Fix cid/gcid calculations for fs.OverrideRemote (wiserain)
* Fix fatal crash on startup with token that can't be refreshed (Nick Craig-Wood)
* S3
* Fix crash when using `--s3-download-url` after migration to SDKv2 (Nick Craig-Wood)
* Storj provider: fix server-side copy of files bigger than 5GB (Kaloyan Raev)
* Fix multitenant multipart uploads with CEPH (Nick Craig-Wood)
## v1.68.1 - 2024-09-24
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)

View file

@ -537,13 +537,6 @@ sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docke
```
though this is rarely needed.
If the plugin fails to work properly, and only as a last resort after you tried diagnosing with the above methods, you can try clearing the state of the plugin. **Note that all existing rclone docker volumes will probably have to be recreated.** This might be needed because a reinstall don't cleanup existing state files to allow for easy restoration, as stated above.
```
docker plugin disable rclone # disable the plugin to ensure no interference
sudo rm /var/lib/docker-plugins/rclone/cache/docker-plugin.state # removing the plugin state
docker plugin enable rclone # re-enable the plugin afterward
```
## Caveats
Finally I'd like to mention a _caveat with updating volume settings_.

View file

@ -53,7 +53,6 @@ See the following for detailed instructions for
* [Hetzner Storage Box](/sftp/#hetzner-storage-box)
* [HiDrive](/hidrive/)
* [HTTP](/http/)
* [iCloud Drive](/iclouddrive/)
* [Internet Archive](/internetarchive/)
* [Jottacloud](/jottacloud/)
* [Koofr](/koofr/)
@ -1426,22 +1425,6 @@ The options mean
During rmdirs it will not remove root directory, even if it's empty.
### --links / -l
Normally rclone will ignore symlinks or junction points (which behave
like symlinks under Windows).
If you supply this flag then rclone will copy symbolic links from any
supported backend backend, and store them as text files, with a
`.rclonelink` suffix in the destination.
The text file will contain the target of the symbolic link.
The `--links` / `-l` flag enables this feature for all supported
backends and the VFS. There are individual flags for just enabling it
for the VFS `--vfs-links` and the local backend `--local-links` if
required.
### --log-file=FILE ###
Log all of rclone's output to FILE. This is not active by default.

View file

@ -536,7 +536,6 @@ represent the currently available conversions.
| html | text/html | An HTML Document |
| jpg | image/jpeg | A JPEG Image File |
| json | application/vnd.google-apps.script+json | JSON Text Format for Google Apps scripts |
| md | text/markdown | Markdown Text Format |
| odp | application/vnd.oasis.opendocument.presentation | Openoffice Presentation |
| ods | application/vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
| ods | application/x-vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet |
@ -1810,9 +1809,9 @@ then select "OAuth client ID".
9. It will show you a client ID and client secret. Make a note of these.
(If you selected "External" at Step 5 continue to Step 10.
(If you selected "External" at Step 5 continue to Step 9.
If you chose "Internal" you don't need to publish and can skip straight to
Step 11 but your destination drive must be part of the same Google Workspace.)
Step 10 but your destination drive must be part of the same Google Workspace.)
10. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm.
You will also want to add yourself as a test user.

View file

@ -4,7 +4,7 @@ description: "Rclone docs for Files.com"
versionIntroduced: "v1.68"
---
# {{< icon "fa fa-brands fa-files-pinwheel" >}} Files.com
# {{< icon "fa fa-file-alt" >}} Files.com
[Files.com](https://www.files.com/) is a cloud storage service that provides a
secure and easy way to store and share files.

View file

@ -505,8 +505,6 @@ processed in.
Arrange the order of filter rules with the most restrictive first and
work down.
Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._
E.g. for `filter-file.txt`:
# a sample filter rule file
@ -514,7 +512,6 @@ E.g. for `filter-file.txt`:
+ *.jpg
+ *.png
+ file2.avi
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
- /dir/Trash/**
+ /dir/**
# exclude everything else

View file

@ -201,55 +201,6 @@ the rclone config file, you can set `service_account_credentials` with
the actual contents of the file instead, or set the equivalent
environment variable.
### Service Account Authentication with Access Tokens
Another option for service account authentication is to use access tokens via *gcloud impersonate-service-account*. Access tokens protect security by avoiding the use of the JSON
key file, which can be breached. They also bypass oauth login flow, which is simpler
on remote VMs that lack a web browser.
If you already have a working service account, skip to step 3.
#### 1. Create a service account using
gcloud iam service-accounts create gcs-read-only
You can re-use an existing service account as well (like the one created above)
#### 2. Attach a Viewer (read-only) or User (read-write) role to the service account
$ PROJECT_ID=my-project
$ gcloud --verbose iam service-accounts add-iam-policy-binding \
gcs-read-only@${PROJECT_ID}.iam.gserviceaccount.com \
--member=serviceAccount:gcs-read-only@${PROJECT_ID}.iam.gserviceaccount.com \
--role=roles/storage.objectViewer
Use the Google Cloud console to identify a limited role. Some relevant pre-defined roles:
* *roles/storage.objectUser* -- read-write access but no admin privileges
* *roles/storage.objectViewer* -- read-only access to objects
* *roles/storage.admin* -- create buckets & administrative roles
#### 3. Get a temporary access key for the service account
$ gcloud auth application-default print-access-token \
--impersonate-service-account \
gcs-read-only@${PROJECT_ID}.iam.gserviceaccount.com
ya29.c.c0ASRK0GbAFEewXD [truncated]
#### 4. Update `access_token` setting
hit `CTRL-C` when you see *waiting for code*. This will save the config without doing oauth flow
rclone config update ${REMOTE_NAME} access_token ya29.c.c0Axxxx
#### 5. Run rclone as usual
rclone ls dev-gcs:${MY_BUCKET}/
### More Info on Service Accounts
* [Official GCS Docs](https://cloud.google.com/compute/docs/access/service-accounts)
* [Guide on Service Accounts using Key Files (less secure, but similar concepts)](https://forum.rclone.org/t/access-using-google-service-account/24822/2)
### Anonymous Access
For downloads of objects that permit public access you can configure rclone

View file

@ -1,159 +0,0 @@
---
title: "iCloud Drive"
description: "Rclone docs for iCloud Drive"
versionIntroduced: "v1.69"
---
# {{< icon "fa fa-cloud" >}} iCloud Drive
## Configuration
The initial setup for an iCloud Drive backend involves getting a trust token/session. This can be done by simply using the regular iCloud password, and accepting the code prompt on another iCloud connected device.
`IMPORTANT: At the moment an app specific password won't be accepted. Only use your regular password and 2FA.`
`rclone config` walks you through the token creation. The trust token is valid for 30 days. After which you will have to reauthenticate with rclone reconnect or rclone config.
Here is an example of how to make a remote called `iclouddrive`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> iclouddrive
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
XX / iCloud Drive
\ (iclouddrive)
[snip]
Storage> iclouddrive
Option apple_id.
Apple ID.
Enter a value.
apple_id> APPLEID
Option password.
Password.
Choose an alternative below.
y) Yes, type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Option config_2fa.
Two-factor authentication: please enter your 2FA code
Enter a value.
config_2fa> 2FACODE
Remote config
--------------------
[koofr]
- type: iclouddrive
- apple_id: APPLEID
- password: *** ENCRYPTED ***
- cookies: ****************************
- trust_token: ****************************
--------------------
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
## Advanced Data Protection
ADP is currently unsupported and need to be disabled
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to iclouddrive (iCloud Drive).
#### --iclouddrive-apple-id
Apple ID.
Properties:
- Config: apple_id
- Env Var: RCLONE_ICLOUDDRIVE_APPLE_ID
- Type: string
- Required: true
#### --iclouddrive-password
Password.
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
Properties:
- Config: password
- Env Var: RCLONE_ICLOUDDRIVE_PASSWORD
- Type: string
- Required: true
#### --iclouddrive-trust-token
trust token (internal use)
Properties:
- Config: trust_token
- Env Var: RCLONE_ICLOUDDRIVE_TRUST_TOKEN
- Type: string
- Required: false
#### --iclouddrive-cookies
cookies (internal use only)
Properties:
- Config: cookies
- Env Var: RCLONE_ICLOUDDRIVE_COOKIES
- Type: string
- Required: false
### Advanced options
Here are the Advanced options specific to iclouddrive (iCloud Drive).
#### --iclouddrive-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_ICLOUDDRIVE_ENCODING
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
#### --iclouddrive-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_ICLOUDDRIVE_DESCRIPTION
- Type: string
- Required: false
{{< rem autogenerated options stop >}}

View file

@ -209,13 +209,13 @@ $ rclone -L ls /tmp/a
6 b/one
```
#### --local-links, --links, -l
#### --links, -l
Normally rclone will ignore symlinks or junction points (which behave
like symlinks under Windows).
If you supply this flag then rclone will copy symbolic links from the local storage,
and store them as text files, with a `.rclonelink` suffix in the remote storage.
and store them as text files, with a '.rclonelink' suffix in the remote storage.
The text file will contain the target of the symbolic link (see example).
@ -236,7 +236,7 @@ Copying the entire directory with '-l'
$ rclone copy -l /tmp/a/ remote:/tmp/a/
```
The remote files are created with a `.rclonelink` suffix
The remote files are created with a '.rclonelink' suffix
```
$ rclone ls remote:/tmp/a
@ -274,7 +274,7 @@ $ tree /tmp/b
/tmp/b
├── file1.rclonelink
└── file2.rclonelink
```
````
If you want to copy a single file with `-l` then you must use the `.rclonelink` suffix.
@ -286,10 +286,6 @@ $ tree /tmp/c
└── file1 -> ./file4
```
Note that `--local-links` just enables this feature for the local
backend. `--links` and `-l` enable the feature for all supported
backends and the VFS.
Note that this flag is incompatible with `-copy-links` / `-L`.
### Restricting filesystems with --one-file-system
@ -365,9 +361,9 @@ Properties:
- Type: bool
- Default: false
#### --local-links
#### --links / -l
Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend.
Translate symlinks to/from regular files with a '.rclonelink' extension.
Properties:

View file

@ -161,27 +161,6 @@ You may try to [verify you account](https://docs.microsoft.com/en-us/azure/activ
Note: If you have a special region, you may need a different host in step 4 and 5. Here are [some hints](https://github.com/rclone/rclone/blob/bc23bf11db1c78c6ebbf8ea538fbebf7058b4176/backend/onedrive/onedrive.go#L86).
### Using OAuth Client Credential flow
OAuth Client Credential flow will allow rclone to use permissions
directly associated with the Azure AD Enterprise application, rather
that adopting the context of an Azure AD user account.
This flow can be enabled by following the steps below:
1. Create the Enterprise App registration in the Azure AD portal and obtain a Client ID and Client Secret as described above.
2. Ensure that the application has the appropriate permissions and they are assigned as *Application Permissions*
3. Configure the remote, ensuring that *Client ID* and *Client Secret* are entered correctly.
4. In the *Advanced Config* section, enter `true` for `client_credentials` and in the `tenant` section enter the tenant ID.
When it comes to choosing the type of the connection work with the
client credentials flow. In particular the "onedrive" option does not
work. You can use the "sharepoint" option or if that does not find the
correct drive ID type it in manually with the "driveid" option.
**NOTE** Assigning permissions directly to the application means that
anyone with the *Client ID* and *Client Secret* can access your
OneDrive files. Take care to safeguard these credentials.
### Modification times and hashes

View file

@ -33,7 +33,6 @@ Here is an overview of the major features of each cloud storage system.
| HDFS | - | R/W | No | No | - | - |
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
| HTTP | - | R | No | No | R | - |
| iCloud Drive | - | R | No | No | - | - |
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
| Koofr | MD5 | - | Yes | No | - | - |
@ -47,7 +46,7 @@ Here is an overview of the major features of each cloud storage system.
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - |
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
| PikPak | MD5 | R | No | No | R | - |
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
| premiumize.me | - | - | Yes | No | R | - |
@ -506,13 +505,12 @@ upon backend-specific capabilities.
| Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
| FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes |
| Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
| Google Cloud Storage | Yes | Yes | No | No | No | No | Yes | No | No | No | No |
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No | No | No | No |
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
| Google Photos | No | No | No | No | No | No | No | No | No | No | No |
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes |
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
| HTTP | No | No | No | No | No | No | No | No | No | No | Yes |
| iCloud Drive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
| ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes |
| Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No |
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |

View file

@ -39,10 +39,6 @@ Pcloud App Client Id - leave blank normally.
client_id>
Pcloud App Client Secret - leave blank normally.
client_secret>
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Remote config
Use web browser to automatically authenticate rclone with remote?
* Say Y if the machine running rclone has a web browser you can use
@ -71,10 +67,6 @@ y/e/d> y
See the [remote setup docs](/remote_setup/) for how to set it up on a
machine with no Internet browser available.
Note if you are using remote config with rclone authorize while your pcloud
server is the EU region, you will need to set the hostname in 'Edit advanced
config', otherwise you might get a token error.
Note that rclone runs a webserver on your local machine to collect the
token as returned from pCloud. This only runs from the moment it opens
your browser to the moment you get back the verification code. This

View file

@ -18,31 +18,29 @@ If you just want to run a remote control then see the [rcd](/commands/rclone_rcd
### --rc
Flag to start the http server listen on remote requests.
Flag to start the http server listen on remote requests
### --rc-addr=IP
IPaddress:Port or :Port to bind server to. (default "localhost:5572").
IPaddress:Port or :Port to bind server to. (default "localhost:5572")
### --rc-cert=KEY
SSL PEM key (concatenation of certificate and CA certificate).
SSL PEM key (concatenation of certificate and CA certificate)
### --rc-client-ca=PATH
Client certificate authority to verify clients with.
Client certificate authority to verify clients with
### --rc-htpasswd=PATH
htpasswd file - if not provided no authentication is done.
htpasswd file - if not provided no authentication is done
### --rc-key=PATH
TLS PEM private key file.
SSL PEM Private key
### --rc-max-header-bytes=VALUE
Maximum size of request header (default 4096).
Maximum size of request header (default 4096)
### --rc-min-tls-version=VALUE
@ -59,15 +57,15 @@ Password for authentication.
### --rc-realm=VALUE
Realm for authentication (default "rclone").
Realm for authentication (default "rclone")
### --rc-server-read-timeout=DURATION
Timeout for server reading data (default 1h0m0s).
Timeout for server reading data (default 1h0m0s)
### --rc-server-write-timeout=DURATION
Timeout for server writing data (default 1h0m0s).
Timeout for server writing data (default 1h0m0s)
### --rc-serve

View file

@ -27,7 +27,6 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Linode Object Storage" home="https://www.linode.com/products/object-storage/" config="/s3/#linode" >}}
{{< provider name="Magalu Object Storage" home="https://magalu.cloud/object-storage/" config="/s3/#magalu" >}}
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
@ -35,7 +34,6 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Storj" home="https://storj.io/" config="/s3/#storj" >}}
{{< provider name="Synology C2 Object Storage" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
@ -2503,9 +2501,6 @@ Note that Cloudflare decompresses files uploaded with
does. If this is causing a problem then upload the files with
`--header-upload "Cache-Control: no-transform"`
A consequence of this is that `Content-Encoding: gzip` will never
appear in the metadata on Cloudflare.
### Dreamhost
Dreamhost [DreamObjects](https://www.dreamhost.com/cloud/storage/) is
@ -3230,168 +3225,6 @@ So once set up, for example, to copy files into a bucket
rclone copy /path/to/files minio:bucket
```
### Outscale
[OUTSCALE Object Storage (OOS)](https://en.outscale.com/storage/outscale-object-storage/) is an enterprise-grade, S3-compatible storage service provided by OUTSCALE, a brand of Dassault Systèmes. For more information about OOS, see the [official documentation](https://docs.outscale.com/en/userguide/OUTSCALE-Object-Storage-OOS.html).
Here is an example of an OOS configuration that you can paste into your rclone configuration file:
```
[outscale]
type = s3
provider = Outscale
env_auth = false
access_key_id = ABCDEFGHIJ0123456789
secret_access_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
region = eu-west-2
endpoint = oos.eu-west-2.outscale.com
acl = private
```
You can also run `rclone config` to go through the interactive setup process:
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
```
```
Enter name for new remote.
name> outscale
```
```
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
X / Amazon S3 Compliant Storage Providers including AWS, ...Outscale, ...and others
\ (s3)
[snip]
Storage> outscale
```
```
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
[snip]
XX / OUTSCALE Object Storage (OOS)
\ (Outscale)
[snip]
provider> Outscale
```
```
Option env_auth.
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own boolean value (true or false).
Press Enter for the default (false).
1 / Enter AWS credentials in the next step.
\ (false)
2 / Get AWS credentials from the environment (env vars or IAM).
\ (true)
env_auth>
```
```
Option access_key_id.
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
access_key_id> ABCDEFGHIJ0123456789
```
```
Option secret_access_key.
AWS Secret Access Key (password).
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
secret_access_key> XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
```
```
Option region.
Region where your bucket will be created and your data stored.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / Paris, France
\ (eu-west-2)
2 / New Jersey, USA
\ (us-east-2)
3 / California, USA
\ (us-west-1)
4 / SecNumCloud, Paris, France
\ (cloudgouv-eu-west-1)
5 / Tokyo, Japan
\ (ap-northeast-1)
region> 1
```
```
Option endpoint.
Endpoint for S3 API.
Required when using an S3 clone.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / Outscale EU West 2 (Paris)
\ (oos.eu-west-2.outscale.com)
2 / Outscale US east 2 (New Jersey)
\ (oos.us-east-2.outscale.com)
3 / Outscale EU West 1 (California)
\ (oos.us-west-1.outscale.com)
4 / Outscale SecNumCloud (Paris)
\ (oos.cloudgouv-eu-west-1.outscale.com)
5 / Outscale AP Northeast 1 (Japan)
\ (oos.ap-northeast-1.outscale.com)
endpoint> 1
```
```
Option acl.
Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
/ Owner gets FULL_CONTROL.
1 | No one else has access rights (default).
\ (private)
[snip]
acl> 1
```
```
Edit advanced config?
y) Yes
n) No (default)
y/n> n
```
```
Configuration complete.
Options:
- type: s3
- provider: Outscale
- access_key_id: ABCDEFGHIJ0123456789
- secret_access_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- endpoint: oos.eu-west-2.outscale.com
Keep this "outscale" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
### Qiniu Cloud Object Storage (Kodo) {#qiniu}
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.
@ -3658,8 +3491,8 @@ chunk_size = 5M
copy_cutoff = 5M
```
[Scaleway Glacier](https://www.scaleway.com/en/glacier-cold-storage/) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to Scaleway Glacier. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
[C14 Cold Storage](https://www.online.net/en/storage/c14-cold-storage) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to C14. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
### Seagate Lyve Cloud {#lyve}
@ -3854,125 +3687,6 @@ So once set up, for example to copy files into a bucket
rclone copy /path/to/files seaweedfs_s3:foo
```
### Selectel
[Selectel Cloud Storage](https://selectel.ru/services/cloud/storage/)
is an S3 compatible storage system which features triple redundancy
storage, automatic scaling, high availability and a comprehensive IAM
system.
Selectel have a section on their website for [configuring
rclone](https://docs.selectel.ru/en/cloud/object-storage/tools/rclone/)
which shows how to make the right API keys.
From rclone v1.69 Selectel is a supported operator - please choose the
`Selectel` provider type.
Note that you should use "vHosted" access for the buckets (which is
the recommended default), not "path style".
You can use `rclone config` to make a new provider like this
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
Enter name for new remote.
name> selectel
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
XX / Amazon S3 Compliant Storage Providers including ..., Selectel, ...
\ (s3)
[snip]
Storage> s3
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
[snip]
XX / Selectel Object Storage
\ (Selectel)
[snip]
provider> Selectel
Option env_auth.
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own boolean value (true or false).
Press Enter for the default (false).
1 / Enter AWS credentials in the next step.
\ (false)
2 / Get AWS credentials from the environment (env vars or IAM).
\ (true)
env_auth> 1
Option access_key_id.
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
access_key_id> ACCESS_KEY
Option secret_access_key.
AWS Secret Access Key (password).
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
secret_access_key> SECRET_ACCESS_KEY
Option region.
Region where your data stored.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / St. Petersburg
\ (ru-1)
region> 1
Option endpoint.
Endpoint for Selectel Object Storage.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / Saint Petersburg
\ (s3.ru-1.storage.selcloud.ru)
endpoint> 1
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: s3
- provider: Selectel
- access_key_id: ACCESS_KEY
- secret_access_key: SECRET_ACCESS_KEY
- region: ru-1
- endpoint: s3.ru-1.storage.selcloud.ru
Keep this "selectel" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
And your config should end up looking like this:
```
[selectel]
type = s3
provider = Selectel
access_key_id = ACCESS_KEY
secret_access_key = SECRET_ACCESS_KEY
region = ru-1
endpoint = s3.ru-1.storage.selcloud.ru
```
### Wasabi
[Wasabi](https://wasabi.com) is a cloud-based object storage service for a

View file

@ -156,7 +156,7 @@ and the public key built into it will be used during the authentication process.
If you have a certificate you may use it to sign your public key, creating a
separate SSH user certificate that should be used instead of the plain public key
extracted from the private key. Then you must provide the path to the
user certificate public key file in `pubkey_file` or the content of the file in `pubkey`.
user certificate public key file in `pubkey_file`.
Note: This is not the traditional public key paired with your private key,
typically saved as `/home/$USER/.ssh/id_rsa.pub`. Setting this path in
@ -494,19 +494,6 @@ Properties:
- Type: string
- Required: false
#### --sftp-pubkey
SSH public certificate for public certificate based authentication.
Set this if you have a signed certificate you want to use for authentication.
If specified will override pubkey_file.
Properties:
- Config: pubkey
- Env Var: RCLONE_SFTP_PUBKEY
- Type: string
- Required: false
#### --sftp-pubkey-file
Optional path to public key file.

View file

@ -11,9 +11,7 @@
<title>{{ block "title" . }}{{ .Title }}{{ end }}</title>
<link rel="canonical" href="{{ .Permalink }}">
<link href="/css/bootstrap.min.4.4.1.css" rel="stylesheet">
<link href="/fontawesome/css/fontawesome.min.css" rel="stylesheet">
<link href="/fontawesome/css/brands.min.css" rel="stylesheet">
<link href="/fontawesome/css/solid.min.css" rel="stylesheet">
<link href="/css/font-awesome.min.5.10.2.css" rel="stylesheet">
<link href="/css/custom.css?r={{ .Date.Unix }}" rel="stylesheet">
{{ $RSSLink := "" }}{{ with .OutputFormats.Get "RSS" }}{{ $RSSLink = .RelPermalink }}{{ end }}{{ if $RSSLink }}<link href="{{ $RSSLink }}" rel="alternate" type="application/rss+xml" title="{{ .Site.Title }}" />{{ end }}
</head>

View file

@ -65,7 +65,7 @@
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
<a class="dropdown-item" href="/filescom/"><i class="fa fa-brands fa-files-pinwheel fa-fw"></i> Files.com</a>
<a class="dropdown-item" href="/filescom/"><i class="fa fa-file-alt fa-fw"></i> Files.com</a>
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a>
<a class="dropdown-item" href="/gofile/"><i class="fa fa-folder fa-fw"></i> Gofile</a>
<a class="dropdown-item" href="/googlecloudstorage/"><i class="fab fa-google fa-fw"></i> Google Cloud Storage</a>
@ -75,7 +75,6 @@
<a class="dropdown-item" href="/hdfs/"><i class="fa fa-globe fa-fw"></i> HDFS (Hadoop Distributed Filesystem)</a>
<a class="dropdown-item" href="/hidrive/"><i class="fa fa-cloud fa-fw"></i> HiDrive</a>
<a class="dropdown-item" href="/http/"><i class="fa fa-globe fa-fw"></i> HTTP</a>
<a class="dropdown-item" href="/iclouddrive/"><i class="fa fa-archive fa-fw"></i> iCloud Drive</a>
<a class="dropdown-item" href="/imagekit/"><i class="fa fa-cloud fa-fw"></i> ImageKit</a>
<a class="dropdown-item" href="/internetarchive/"><i class="fa fa-archive fa-fw"></i> Internet Archive</a>
<a class="dropdown-item" href="/jottacloud/"><i class="fa fa-cloud fa-fw"></i> Jottacloud</a>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,6 +0,0 @@
/*!
* Font Awesome Free 6.7.1 by @fontawesome - https://fontawesome.com
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
* Copyright 2024 Fonticons, Inc.
*/
:host,:root{--fa-style-family-classic:"Font Awesome 6 Free";--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}.fa-solid,.fas{font-weight:900}

Binary file not shown.

BIN
docs/static/webfonts/fa-brands-400.eot vendored Normal file

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show more