Merge branch 'rclone:master' into user-from-header
This commit is contained in:
commit
4e9c0a076a
60 changed files with 1569 additions and 518 deletions
|
@ -46,7 +46,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -65,12 +64,10 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
|
|
@ -80,9 +80,10 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
driveConfig = &oauthutil.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
|
|
@ -94,7 +94,7 @@ const (
|
|||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"files.metadata.write",
|
||||
"files.content.write",
|
||||
|
@ -109,7 +109,8 @@ var (
|
|||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -134,7 +135,7 @@ var (
|
|||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
||||
// If not impersonating, use standard scopes
|
||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||
return dropboxConfig
|
||||
|
|
|
@ -60,14 +60,17 @@ const (
|
|||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
// Description of how to auth for this app
|
||||
var storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauthutil.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
|
|
|
@ -33,7 +33,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
|
@ -60,13 +59,14 @@ const (
|
|||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -48,11 +47,9 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app.
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
|
|
|
@ -277,11 +277,9 @@ machines.`)
|
|||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -292,11 +290,9 @@ machines.`)
|
|||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -307,11 +303,9 @@ machines.`)
|
|||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -322,11 +316,9 @@ machines.`)
|
|||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -924,19 +916,17 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
oauthConfig := &oauthutil.Config{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
}
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = defaultClientID
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
oauthConfig.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
|
@ -950,8 +940,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||
oauthConfig.TokenURL = legacyTokenURL
|
||||
oauthConfig.AuthURL = legacyTokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
|
|
|
@ -34,7 +34,6 @@ import (
|
|||
// Constants
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
|
||||
|
@ -101,10 +100,8 @@ Metadata is supported on files and directories.
|
|||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
|
@ -379,17 +376,22 @@ type Directory struct {
|
|||
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Override --local-links with --links if set
|
||||
if ci.Links {
|
||||
opt.TranslateSymlinks = true
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
|
@ -435,9 +437,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
|
@ -508,8 +510,8 @@ func (f *Fs) caseInsensitive() bool {
|
|||
//
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
|
@ -692,7 +694,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
newRemote += fs.LinkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
|
|
|
@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
|
|||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
|
@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
|
|||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
|
@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
|
|||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
|
@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
|
|||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
|
@ -277,7 +277,7 @@ func TestMetadata(t *testing.T) {
|
|||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += linkSuffix
|
||||
symlinkPath += fs.LinkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
|
|
@ -68,14 +68,12 @@ var (
|
|||
)
|
||||
|
||||
// Description of how to authorize
|
||||
var oauthConfig = &oauth2.Config{
|
||||
var oauthConfig = &oauthutil.Config{
|
||||
ClientID: api.OAuthClientID,
|
||||
ClientSecret: "",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
|
@ -438,7 +436,9 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
|||
if err != nil || !tokenIsValid(t) {
|
||||
fs.Infof(f, "Valid token not found, authorizing.")
|
||||
ctx := oauthutil.Context(ctx, f.cli)
|
||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
|
||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
||||
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
}
|
||||
if err == nil && !tokenIsValid(t) {
|
||||
err = errors.New("invalid token")
|
||||
|
|
|
@ -40,7 +40,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -65,14 +64,21 @@ const (
|
|||
|
||||
// Globals
|
||||
var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
// Define the paths used for token operations
|
||||
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
|
||||
authPath = "/oauth2/v2.0/authorize"
|
||||
tokenPath = "/oauth2/v2.0/token"
|
||||
|
||||
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
||||
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
// When using client credential OAuth flow, scope of .default is required in order
|
||||
// to use the permissions configured for the application within the tenant
|
||||
scopeAccessClientCred = fs.SpaceSepList{".default"}
|
||||
|
||||
// Base config for how to auth
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopeAccess,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
|
@ -183,6 +189,14 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
|||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
|
||||
Set this if using
|
||||
- Client Credential flow
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
@ -527,28 +541,54 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
|
|||
})
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
region, graphURL := getRegionURL(m)
|
||||
// Make the oauth config for the backend
|
||||
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
|
||||
// Copy the default oauthConfig
|
||||
oauthConfig := *oauthConfig
|
||||
|
||||
if config.State == "" {
|
||||
var accessScopes fs.SpaceSepList
|
||||
accessScopesString, _ := m.Get("access_scopes")
|
||||
err := accessScopes.Set(accessScopesString)
|
||||
// Set the scopes
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
|
||||
// Construct the auth URLs
|
||||
prefix := commonPathPrefix
|
||||
if opt.Tenant != "" {
|
||||
prefix = "/" + opt.Tenant
|
||||
}
|
||||
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
|
||||
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
|
||||
|
||||
// Check to see if we are using client credentials flow
|
||||
if opt.ClientCredentials {
|
||||
// Override scope to .default
|
||||
oauthConfig.Scopes = scopeAccessClientCred
|
||||
if opt.Tenant == "" {
|
||||
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
|
||||
}
|
||||
}
|
||||
|
||||
return &oauthConfig, nil
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, graphURL := getRegionURL(m)
|
||||
|
||||
// Check to see if this is the start of the state machine execution
|
||||
if conf.State == "" {
|
||||
conf, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
|
||||
}
|
||||
oauthConfig.Scopes = []string(accessScopes)
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
return nil, err
|
||||
}
|
||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
OAuth2Config: conf,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -556,9 +596,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||
}
|
||||
|
||||
// Create a REST client, build on the OAuth client created above
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
switch config.State {
|
||||
switch conf.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
|
@ -584,7 +626,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "onedrive":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
|
@ -602,16 +644,22 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
|||
},
|
||||
})
|
||||
case "driveid":
|
||||
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
// Default the drive_id to the previous version in the config
|
||||
out.Option.Default, _ = m.Get("drive_id")
|
||||
return out, nil
|
||||
case "driveid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
finalDriveID: config.Result,
|
||||
finalDriveID: conf.Result,
|
||||
})
|
||||
case "siteid":
|
||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||
case "siteid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
|
@ -622,7 +670,7 @@ Examples:
|
|||
- "https://XXX.sharepoint.com/teams/ID"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
siteURL := conf.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
|
@ -637,12 +685,12 @@ Examples:
|
|||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||
case "path_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: config.Result,
|
||||
relativePath: conf.Result,
|
||||
})
|
||||
case "search":
|
||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||
case "search_end":
|
||||
searchTerm := config.Result
|
||||
searchTerm := conf.Result
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
|
@ -664,10 +712,10 @@ Examples:
|
|||
})
|
||||
case "search_sites":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "driveid_final":
|
||||
finalDriveID := config.Result
|
||||
finalDriveID := conf.Result
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts := rest.Opts{
|
||||
|
@ -686,12 +734,12 @@ Examples:
|
|||
|
||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||
case "driveid_final_end":
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("choose_type")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
|
@ -702,7 +750,9 @@ type Options struct {
|
|||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ClientCredentials bool `config:"client_credentials"`
|
||||
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
||||
Tenant string `config:"tenant"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
|
@ -990,13 +1040,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
|
||||
oauthConfig, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
@ -2563,8 +2610,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return errors.New("can't upload content to a OneNote file")
|
||||
}
|
||||
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
// Only start the renewer if we have a valid one
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
|
||||
|
|
|
@ -106,9 +106,9 @@ func newOptions() []fs.Option {
|
|||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
|
|
|
@ -48,12 +48,10 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -61,8 +59,8 @@ var (
|
|||
)
|
||||
|
||||
// Update the TokenURL with the actual hostname
|
||||
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
|
||||
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
|
||||
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
|
@ -79,7 +77,7 @@ func init() {
|
|||
fs.Errorf(nil, "Failed to read config: %v", err)
|
||||
}
|
||||
updateTokenURL(oauthConfig, optc.Hostname)
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("form not found in response")
|
||||
}
|
||||
|
@ -399,14 +397,15 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
|
||||
return nil, fmt.Errorf("close file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
|
|
|
@ -18,21 +18,14 @@ import (
|
|||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
|
@ -72,8 +65,18 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
|||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
client, err := c.fs.newSingleConnClient(c.ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
|
||||
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -89,10 +92,15 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
|||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// close fd
|
||||
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
|
||||
return contentLength, fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
|
@ -125,11 +133,40 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
|
||||
opts.Parameters.Set("flags", "0x0002") // O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
func fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd, offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
|
@ -140,26 +177,29 @@ func (c *writerAt) fileChecksum(
|
|||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
func filePWrite(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
|
@ -176,24 +216,29 @@ func (c *writerAt) filePWrite(
|
|||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
func fileClose(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
|
@ -201,11 +246,11 @@ func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error
|
|||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
|
|
@ -82,13 +82,11 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
ClientID: clientID,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
|
@ -215,6 +213,11 @@ Fill in for rclone to use a non root folder as its starting point.
|
|||
Default: false,
|
||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_media_link",
|
||||
Default: false,
|
||||
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
|
||||
|
@ -288,6 +291,7 @@ type Options struct {
|
|||
RootFolderID string `config:"root_folder_id"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
NoMediaLink bool `config:"no_media_link"`
|
||||
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
|
@ -1577,15 +1581,14 @@ func (o *Object) setMetaData(info *api.File) (err error) {
|
|||
o.md5sum = info.Md5Checksum
|
||||
if info.Links.ApplicationOctetStream != nil {
|
||||
o.link = info.Links.ApplicationOctetStream
|
||||
if fid := parseFileID(o.link.URL); fid != "" {
|
||||
for mid, media := range info.Medias {
|
||||
if media.Link == nil {
|
||||
continue
|
||||
}
|
||||
if mfid := parseFileID(media.Link.URL); fid == mfid {
|
||||
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
|
||||
o.link = media.Link
|
||||
break
|
||||
if !o.fs.opt.NoMediaLink {
|
||||
if fid := parseFileID(o.link.URL); fid != "" {
|
||||
for _, media := range info.Medias {
|
||||
if media.Link != nil && parseFileID(media.Link.URL) == fid {
|
||||
fs.Debugf(o, "Using a media link")
|
||||
o.link = media.Link
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -59,12 +58,10 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://www.premiumize.me/authorize",
|
||||
TokenURL: "https://www.premiumize.me/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://www.premiumize.me/authorize",
|
||||
TokenURL: "https://www.premiumize.me/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -41,12 +40,10 @@ const (
|
|||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
putioConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
},
|
||||
putioConfig = &oauthutil.Config{
|
||||
Scopes: []string{},
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
|
|
@ -97,7 +97,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -115,13 +114,11 @@ const (
|
|||
)
|
||||
|
||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||
func newOauthConfig(tokenURL string) *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
func newOauthConfig(tokenURL string) *oauthutil.Config {
|
||||
return &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||
TokenURL: tokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectPublicSecureURL,
|
||||
|
@ -136,7 +133,7 @@ func init() {
|
|||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
oauthConfig := newOauthConfig("")
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("endpoint not found in response")
|
||||
}
|
||||
|
@ -147,7 +144,7 @@ func init() {
|
|||
}
|
||||
endpoint := "https://" + subdomain + "." + apicp
|
||||
m.Set("endpoint", endpoint)
|
||||
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
|
||||
oauthConfig.TokenURL = endpoint + tokenPath
|
||||
return nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// oAuth
|
||||
|
@ -47,11 +46,9 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
|
|
@ -47,7 +47,7 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"aaaserver.profile.read",
|
||||
"WorkDrive.team.READ",
|
||||
|
@ -55,11 +55,10 @@ var (
|
|||
"WorkDrive.files.ALL",
|
||||
"ZohoFiles.files.ALL",
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
@ -276,8 +275,8 @@ func setupRegion(m configmap.Mapper) error {
|
|||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,15 +7,18 @@ Run with no arguments to test all backends or a supply a list of
|
|||
backends to test.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
all_backends = "backend/all/all.go"
|
||||
|
||||
# compile command which is more or less like the production builds
|
||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
# disable CGO as that makes a lot of difference to binary size
|
||||
os.environ["CGO_ENABLED"]="0"
|
||||
|
||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||
|
||||
|
@ -43,6 +46,9 @@ def write_all(orig_all, backend):
|
|||
# Comment out line matching backend
|
||||
if match and match.group(1) == backend:
|
||||
line = "// " + line
|
||||
# s3 and pikpak depend on each other
|
||||
if backend == "s3" and "pikpak" in line:
|
||||
line = "// " + line
|
||||
fd.write(line+"\n")
|
||||
|
||||
def compile():
|
||||
|
|
|
@ -121,19 +121,6 @@ func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, err
|
|||
return leaf, dir, errc
|
||||
}
|
||||
|
||||
// lookup a File given a path
|
||||
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
file, ok := node.(*vfs.File)
|
||||
if !ok {
|
||||
return nil, -fuse.EISDIR
|
||||
}
|
||||
return file, 0
|
||||
}
|
||||
|
||||
// get a node and handle from the path or from the fh if not fhUnset
|
||||
//
|
||||
// handle may be nil
|
||||
|
@ -154,15 +141,9 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
|
|||
Size := uint64(node.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
modTime := node.ModTime()
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
//stat.Dev = 1
|
||||
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
||||
stat.Mode = uint32(Mode)
|
||||
stat.Mode = getMode(node)
|
||||
stat.Nlink = 1
|
||||
stat.Uid = fsys.VFS.Opt.UID
|
||||
stat.Gid = fsys.VFS.Opt.GID
|
||||
|
@ -509,14 +490,15 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
|
|||
|
||||
// Symlink creates a symbolic link.
|
||||
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
||||
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
defer log.Trace(target, "newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
|
||||
return translateError(fsys.VFS.Symlink(target, newpath))
|
||||
}
|
||||
|
||||
// Readlink reads the target of a symbolic link.
|
||||
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
||||
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
|
||||
return -fuse.ENOSYS, ""
|
||||
defer log.Trace(path, "")("errc=%v, linkPath=%q", &errc, linkPath)
|
||||
linkPath, err := fsys.VFS.Readlink(path)
|
||||
return translateError(err), linkPath
|
||||
}
|
||||
|
||||
// Chmod changes the permission bits of a file.
|
||||
|
@ -580,7 +562,7 @@ func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string
|
|||
return errc, ""
|
||||
}
|
||||
normalisedPath = node.Path()
|
||||
if !strings.HasPrefix("/", normalisedPath) {
|
||||
if !strings.HasPrefix(normalisedPath, "/") {
|
||||
normalisedPath = "/" + normalisedPath
|
||||
}
|
||||
return 0, normalisedPath
|
||||
|
@ -615,6 +597,8 @@ func translateError(err error) (errc int) {
|
|||
return -fuse.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return -fuse.EINVAL
|
||||
case vfs.ELOOP:
|
||||
return -fuse.ELOOP
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return -fuse.EIO
|
||||
|
@ -646,6 +630,22 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
|||
return outFlags
|
||||
}
|
||||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
vfsMode := node.Mode()
|
||||
Mode := vfsMode.Perm()
|
||||
if vfsMode&os.ModeDir != 0 {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else if vfsMode&os.ModeSymlink != 0 {
|
||||
Mode |= fuse.S_IFLNK
|
||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
||||
Mode |= fuse.S_IFIFO
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
return uint32(Mode)
|
||||
}
|
||||
|
||||
// Make sure interfaces are satisfied
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
|
@ -35,19 +34,6 @@ func init() {
|
|||
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||
}
|
||||
|
||||
// Find the option string in the current options
|
||||
func findOption(name string, options []string) (found bool) {
|
||||
for _, option := range options {
|
||||
if option == "-o" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(option, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
|
@ -93,9 +79,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||
if VFS.Opt.ReadOnly {
|
||||
options = append(options, "-o", "ro")
|
||||
}
|
||||
if opt.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
//if opt.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
//}
|
||||
if runtime.GOOS == "darwin" {
|
||||
if opt.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+opt.VolumeName)
|
||||
|
@ -111,9 +97,7 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||
for _, option := range opt.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
for _, option := range opt.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
options = append(options, opt.ExtraFlags...)
|
||||
return options
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -33,7 +34,7 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
|||
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | os.FileMode(d.VFS().Opt.DirPerms)
|
||||
a.Mode = d.Mode()
|
||||
modTime := d.ModTime()
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
|
@ -140,11 +141,13 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
|
|||
// Create makes a new file
|
||||
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
||||
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
||||
file, err := d.Dir.Create(req.Name, int(req.Flags))
|
||||
// translate the fuse flags to os flags
|
||||
osFlags := int(req.Flags) | os.O_CREATE
|
||||
file, err := d.Dir.Create(req.Name, osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
|
||||
fh, err := file.Open(osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
|
@ -200,7 +203,6 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
|
|||
if !ok {
|
||||
return fmt.Errorf("unknown Dir type %T", newDir)
|
||||
}
|
||||
|
||||
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
|
@ -239,6 +241,24 @@ func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node)
|
|||
return nil, syscall.ENOSYS
|
||||
}
|
||||
|
||||
var _ fusefs.NodeSymlinker = (*Dir)(nil)
|
||||
|
||||
// Symlink create a symbolic link.
|
||||
func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) {
|
||||
defer log.Trace(d, "newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err)
|
||||
|
||||
newName := path.Join(d.Path(), req.NewName)
|
||||
target := req.Target
|
||||
|
||||
n, err := d.VFS().CreateSymlink(target, newName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node = &File{n.(*vfs.File), d.fsys}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeMknoder = (*Dir)(nil)
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
|||
Blocks := (Size + 511) / 512
|
||||
a.Gid = f.VFS().Opt.GID
|
||||
a.Uid = f.VFS().Opt.UID
|
||||
a.Mode = os.FileMode(f.VFS().Opt.FilePerms)
|
||||
a.Mode = f.File.Mode() &^ os.ModeAppend
|
||||
a.Size = Size
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
|
@ -129,3 +129,11 @@ func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) er
|
|||
}
|
||||
|
||||
var _ fusefs.NodeRemovexattrer = (*File)(nil)
|
||||
|
||||
var _ fusefs.NodeReadlinker = (*File)(nil)
|
||||
|
||||
// Readlink read symbolic link target.
|
||||
func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) {
|
||||
defer log.Trace(f, "")("ret=%v, err=%v", &ret, &err)
|
||||
return f.VFS().Readlink(f.Path())
|
||||
}
|
||||
|
|
|
@ -100,6 +100,8 @@ func translateError(err error) error {
|
|||
return syscall.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return fuse.Errno(syscall.EINVAL)
|
||||
case vfs.ELOOP:
|
||||
return fuse.Errno(syscall.ELOOP)
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return err
|
||||
|
|
|
@ -51,9 +51,14 @@ func (f *FS) SetDebug(debug bool) {
|
|||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
vfsMode := node.Mode()
|
||||
Mode := vfsMode.Perm()
|
||||
if vfsMode&os.ModeDir != 0 {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else if vfsMode&os.ModeSymlink != 0 {
|
||||
Mode |= fuse.S_IFLNK
|
||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
||||
Mode |= fuse.S_IFIFO
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
|
@ -128,6 +133,8 @@ func translateError(err error) syscall.Errno {
|
|||
return syscall.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return syscall.EINVAL
|
||||
case vfs.ELOOP:
|
||||
return syscall.ELOOP
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return syscall.EIO
|
||||
|
|
|
@ -227,7 +227,7 @@ type dirStream struct {
|
|||
// HasNext indicates if there are further entries. HasNext
|
||||
// might be called on already closed streams.
|
||||
func (ds *dirStream) HasNext() bool {
|
||||
return ds.i < len(ds.nodes)
|
||||
return ds.i < len(ds.nodes)+2
|
||||
}
|
||||
|
||||
// Next retrieves the next entry. It is only called if HasNext
|
||||
|
@ -235,7 +235,22 @@ func (ds *dirStream) HasNext() bool {
|
|||
// indicate I/O errors
|
||||
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
|
||||
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
|
||||
fi := ds.nodes[ds.i]
|
||||
if ds.i == 0 {
|
||||
ds.i++
|
||||
return fuse.DirEntry{
|
||||
Mode: fuse.S_IFDIR,
|
||||
Name: ".",
|
||||
Ino: 0, // FIXME
|
||||
}, 0
|
||||
} else if ds.i == 1 {
|
||||
ds.i++
|
||||
return fuse.DirEntry{
|
||||
Mode: fuse.S_IFDIR,
|
||||
Name: "..",
|
||||
Ino: 0, // FIXME
|
||||
}, 0
|
||||
}
|
||||
fi := ds.nodes[ds.i-2]
|
||||
de = fuse.DirEntry{
|
||||
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
|
||||
// are considered.
|
||||
|
@ -443,3 +458,31 @@ func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errn
|
|||
}
|
||||
|
||||
var _ fusefs.NodeListxattrer = (*Node)(nil)
|
||||
|
||||
var _ fusefs.NodeReadlinker = (*Node)(nil)
|
||||
|
||||
// Readlink read symbolic link target.
|
||||
func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) {
|
||||
defer log.Trace(n, "")("ret=%v, err=%v", &ret, &err)
|
||||
path := n.node.Path()
|
||||
s, serr := n.node.VFS().Readlink(path)
|
||||
return []byte(s), translateError(serr)
|
||||
}
|
||||
|
||||
var _ fusefs.NodeSymlinker = (*Node)(nil)
|
||||
|
||||
// Symlink create symbolic link.
|
||||
func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) {
|
||||
defer log.Trace(n, "name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err)
|
||||
fullPath := path.Join(n.node.Path(), name)
|
||||
vfsNode, serr := n.node.VFS().CreateSymlink(target, fullPath)
|
||||
if serr != nil {
|
||||
return nil, translateError(serr)
|
||||
}
|
||||
|
||||
n.fsys.setEntryOut(vfsNode, out)
|
||||
newNode := newNode(n.fsys, vfsNode)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
|
||||
return newInode, 0
|
||||
}
|
||||
|
|
|
@ -373,6 +373,9 @@ func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) {
|
|||
|
||||
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
||||
if err != nil {
|
||||
if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") {
|
||||
return nil, fmt.Errorf("mounting is not supported when running from snap")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||
}
|
||||
m.MountedOn = time.Now()
|
||||
|
|
|
@ -142,16 +142,16 @@ func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
|
|||
return fi, nil
|
||||
}
|
||||
|
||||
// Symlink is not supported over NFS
|
||||
// Symlink creates a link pointing to target
|
||||
func (f *FS) Symlink(target, link string) (err error) {
|
||||
defer log.Trace(target, "link=%q", link)("err=%v", &err)
|
||||
return os.ErrInvalid
|
||||
return f.vfs.Symlink(target, link)
|
||||
}
|
||||
|
||||
// Readlink is not supported
|
||||
// Readlink reads the contents of link
|
||||
func (f *FS) Readlink(link string) (result string, err error) {
|
||||
defer log.Trace(link, "")("result=%q, err=%v", &result, &err)
|
||||
return "", os.ErrInvalid
|
||||
return f.vfs.Readlink(link)
|
||||
}
|
||||
|
||||
// Chmod changes the file modes
|
||||
|
|
|
@ -65,7 +65,7 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
|||
if s.proxy == nil {
|
||||
return s.vfs
|
||||
}
|
||||
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
|
||||
if sshConn.Permissions == nil || sshConn.Permissions.Extensions == nil {
|
||||
fs.Infof(what, "SSH Permissions Extensions not found")
|
||||
return nil
|
||||
}
|
||||
|
@ -143,8 +143,13 @@ func (s *server) serve() (err error) {
|
|||
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
|
||||
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
|
||||
// If user set the flag away from the default then report an error
|
||||
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
return err
|
||||
if s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(authorizedKeysMap) == 0 {
|
||||
return fmt.Errorf("failed to parse authorized keys")
|
||||
}
|
||||
}
|
||||
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
||||
}
|
||||
|
@ -349,11 +354,10 @@ func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string
|
|||
authorizedKeysMap = make(map[string]struct{})
|
||||
for len(authorizedKeysBytes) > 0 {
|
||||
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
|
||||
if err == nil {
|
||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||
}
|
||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||
}
|
||||
return authorizedKeysMap, nil
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ var (
|
|||
maxFileSize = fs.SizeSuffix(100)
|
||||
minFileNameLength = 4
|
||||
maxFileNameLength = 12
|
||||
flat = false
|
||||
seed = int64(1)
|
||||
zero = false
|
||||
sparse = false
|
||||
|
@ -55,6 +56,7 @@ func init() {
|
|||
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "")
|
||||
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "")
|
||||
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "")
|
||||
flags.BoolVarP(makefilesFlags, &flat, "flat", "", false, "If set create all files in the root directory", "")
|
||||
|
||||
test.Command.AddCommand(makefileCmd)
|
||||
makefileFlags := makefileCmd.Flags()
|
||||
|
@ -81,6 +83,9 @@ var makefilesCmd = &cobra.Command{
|
|||
commonInit()
|
||||
outputDirectory := args[0]
|
||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||
if flat {
|
||||
directoriesToCreate = 0
|
||||
}
|
||||
averageSize := (minFileSize + maxFileSize) / 2
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)
|
||||
|
|
|
@ -911,3 +911,11 @@ put them back in again.` >}}
|
|||
* Dimitrios Slamaris <dim0x69@users.noreply.github.com>
|
||||
* vintagefuture <39503528+vintagefuture@users.noreply.github.com>
|
||||
* David Seifert <soap@gentoo.org>
|
||||
* Michael R. Davis <mrdvt92@users.noreply.github.com>
|
||||
* remygrandin <remy.gr@ndin.fr>
|
||||
* Ilias Ozgur Can Leonard <iscilyas@gmail.com>
|
||||
* divinity76 <divinity76@gmail.com>
|
||||
* Martin Hassack <martin@redmaple.tech>
|
||||
* Filipe Azevedo <pasnox@gmail.com>
|
||||
* hayden.pan <hayden.pan@outlook.com>
|
||||
* Yxxx <45665172+marsjane@users.noreply.github.com>
|
||||
|
|
|
@ -1426,6 +1426,22 @@ The options mean
|
|||
|
||||
During rmdirs it will not remove root directory, even if it's empty.
|
||||
|
||||
### --links / -l
|
||||
|
||||
Normally rclone will ignore symlinks or junction points (which behave
|
||||
like symlinks under Windows).
|
||||
|
||||
If you supply this flag then rclone will copy symbolic links from any
|
||||
supported backend backend, and store them as text files, with a
|
||||
`.rclonelink` suffix in the destination.
|
||||
|
||||
The text file will contain the target of the symbolic link.
|
||||
|
||||
The `--links` / `-l` flag enables this feature for all supported
|
||||
backends and the VFS. There are individual flags for just enabling it
|
||||
for the VFS `--vfs-links` and the local backend `--local-links` if
|
||||
required.
|
||||
|
||||
### --log-file=FILE ###
|
||||
|
||||
Log all of rclone's output to FILE. This is not active by default.
|
||||
|
|
|
@ -232,7 +232,7 @@ Use the Google Cloud console to identify a limited role. Some relevant pre-defin
|
|||
|
||||
$ gcloud auth application-default print-access-token \
|
||||
--impersonate-service-account \
|
||||
dev-gcloud-go@${PROJECT_ID}.iam.gserviceaccount.com
|
||||
gcs-read-only@${PROJECT_ID}.iam.gserviceaccount.com
|
||||
|
||||
ya29.c.c0ASRK0GbAFEewXD [truncated]
|
||||
|
||||
|
|
|
@ -209,13 +209,13 @@ $ rclone -L ls /tmp/a
|
|||
6 b/one
|
||||
```
|
||||
|
||||
#### --links, -l
|
||||
#### --local-links, --links, -l
|
||||
|
||||
Normally rclone will ignore symlinks or junction points (which behave
|
||||
like symlinks under Windows).
|
||||
|
||||
If you supply this flag then rclone will copy symbolic links from the local storage,
|
||||
and store them as text files, with a '.rclonelink' suffix in the remote storage.
|
||||
and store them as text files, with a `.rclonelink` suffix in the remote storage.
|
||||
|
||||
The text file will contain the target of the symbolic link (see example).
|
||||
|
||||
|
@ -236,7 +236,7 @@ Copying the entire directory with '-l'
|
|||
$ rclone copy -l /tmp/a/ remote:/tmp/a/
|
||||
```
|
||||
|
||||
The remote files are created with a '.rclonelink' suffix
|
||||
The remote files are created with a `.rclonelink` suffix
|
||||
|
||||
```
|
||||
$ rclone ls remote:/tmp/a
|
||||
|
@ -274,7 +274,7 @@ $ tree /tmp/b
|
|||
/tmp/b
|
||||
├── file1.rclonelink
|
||||
└── file2.rclonelink
|
||||
````
|
||||
```
|
||||
|
||||
If you want to copy a single file with `-l` then you must use the `.rclonelink` suffix.
|
||||
|
||||
|
@ -286,6 +286,10 @@ $ tree /tmp/c
|
|||
└── file1 -> ./file4
|
||||
```
|
||||
|
||||
Note that `--local-links` just enables this feature for the local
|
||||
backend. `--links` and `-l` enable the feature for all supported
|
||||
backends and the VFS.
|
||||
|
||||
Note that this flag is incompatible with `-copy-links` / `-L`.
|
||||
|
||||
### Restricting filesystems with --one-file-system
|
||||
|
@ -361,9 +365,9 @@ Properties:
|
|||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --links / -l
|
||||
#### --local-links
|
||||
|
||||
Translate symlinks to/from regular files with a '.rclonelink' extension.
|
||||
Translate symlinks to/from regular files with a '.rclonelink' extension for the local backend.
|
||||
|
||||
Properties:
|
||||
|
||||
|
|
|
@ -161,6 +161,27 @@ You may try to [verify you account](https://docs.microsoft.com/en-us/azure/activ
|
|||
|
||||
Note: If you have a special region, you may need a different host in step 4 and 5. Here are [some hints](https://github.com/rclone/rclone/blob/bc23bf11db1c78c6ebbf8ea538fbebf7058b4176/backend/onedrive/onedrive.go#L86).
|
||||
|
||||
### Using OAuth Client Credential flow
|
||||
|
||||
OAuth Client Credential flow will allow rclone to use permissions
|
||||
directly associated with the Azure AD Enterprise application, rather
|
||||
that adopting the context of an Azure AD user account.
|
||||
|
||||
This flow can be enabled by following the steps below:
|
||||
|
||||
1. Create the Enterprise App registration in the Azure AD portal and obtain a Client ID and Client Secret as described above.
|
||||
2. Ensure that the application has the appropriate permissions and they are assigned as *Application Permissions*
|
||||
3. Configure the remote, ensuring that *Client ID* and *Client Secret* are entered correctly.
|
||||
4. In the *Advanced Config* section, enter `true` for `client_credentials` and in the `tenant` section enter the tenant ID.
|
||||
|
||||
When it comes to choosing the type of the connection work with the
|
||||
client credentials flow. In particular the "onedrive" option does not
|
||||
work. You can use the "sharepoint" option or if that does not find the
|
||||
correct drive ID type it in manually with the "driveid" option.
|
||||
|
||||
**NOTE** Assigning permissions directly to the application means that
|
||||
anyone with the *Client ID* and *Client Secret* can access your
|
||||
OneDrive files. Take care to safeguard these credentials.
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
|
|
|
@ -39,6 +39,10 @@ Pcloud App Client Id - leave blank normally.
|
|||
client_id>
|
||||
Pcloud App Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
Use web browser to automatically authenticate rclone with remote?
|
||||
* Say Y if the machine running rclone has a web browser you can use
|
||||
|
@ -67,6 +71,10 @@ y/e/d> y
|
|||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note if you are using remote config with rclone authorize while your pcloud
|
||||
server is the EU region, you will need to set the hostname in 'Edit advanced
|
||||
config', otherwise you might get a token error.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from pCloud. This only runs from the moment it opens
|
||||
your browser to the moment you get back the verification code. This
|
||||
|
|
|
@ -18,29 +18,31 @@ If you just want to run a remote control then see the [rcd](/commands/rclone_rcd
|
|||
|
||||
### --rc
|
||||
|
||||
Flag to start the http server listen on remote requests
|
||||
Flag to start the http server listen on remote requests.
|
||||
|
||||
### --rc-addr=IP
|
||||
|
||||
IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
IPaddress:Port or :Port to bind server to. (default "localhost:5572").
|
||||
|
||||
### --rc-cert=KEY
|
||||
SSL PEM key (concatenation of certificate and CA certificate)
|
||||
|
||||
SSL PEM key (concatenation of certificate and CA certificate).
|
||||
|
||||
### --rc-client-ca=PATH
|
||||
Client certificate authority to verify clients with
|
||||
|
||||
Client certificate authority to verify clients with.
|
||||
|
||||
### --rc-htpasswd=PATH
|
||||
|
||||
htpasswd file - if not provided no authentication is done
|
||||
htpasswd file - if not provided no authentication is done.
|
||||
|
||||
### --rc-key=PATH
|
||||
|
||||
SSL PEM Private key
|
||||
TLS PEM private key file.
|
||||
|
||||
### --rc-max-header-bytes=VALUE
|
||||
|
||||
Maximum size of request header (default 4096)
|
||||
Maximum size of request header (default 4096).
|
||||
|
||||
### --rc-min-tls-version=VALUE
|
||||
|
||||
|
@ -57,15 +59,15 @@ Password for authentication.
|
|||
|
||||
### --rc-realm=VALUE
|
||||
|
||||
Realm for authentication (default "rclone")
|
||||
Realm for authentication (default "rclone").
|
||||
|
||||
### --rc-server-read-timeout=DURATION
|
||||
|
||||
Timeout for server reading data (default 1h0m0s)
|
||||
Timeout for server reading data (default 1h0m0s).
|
||||
|
||||
### --rc-server-write-timeout=DURATION
|
||||
|
||||
Timeout for server writing data (default 1h0m0s)
|
||||
Timeout for server writing data (default 1h0m0s).
|
||||
|
||||
### --rc-serve
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo)
|
|||
// Limit number of groups kept in memory.
|
||||
if len(sg.order) >= ci.MaxStatsGroups {
|
||||
group := sg.order[0]
|
||||
fs.LogPrintf(fs.LogLevelDebug, nil, "Max number of stats groups reached removing %s", group)
|
||||
fs.Debugf(nil, "Max number of stats groups reached removing %s", group)
|
||||
delete(sg.m, group)
|
||||
r := (len(sg.order) - ci.MaxStatsGroups) + 1
|
||||
sg.order = sg.order[r:]
|
||||
|
|
|
@ -105,6 +105,12 @@ var ConfigOptionsInfo = Options{{
|
|||
Default: false,
|
||||
Help: "Enable interactive mode",
|
||||
Groups: "Config,Important",
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + LinkSuffix + "' extension.",
|
||||
Default: false,
|
||||
ShortOpt: "l",
|
||||
Groups: "Copy",
|
||||
}, {
|
||||
Name: "contimeout",
|
||||
Default: 60 * time.Second,
|
||||
|
@ -537,6 +543,7 @@ type ConfigInfo struct {
|
|||
UseJSONLog bool `config:"use_json_log"`
|
||||
DryRun bool `config:"dry_run"`
|
||||
Interactive bool `config:"interactive"`
|
||||
Links bool `config:"links"`
|
||||
CheckSum bool `config:"checksum"`
|
||||
SizeOnly bool `config:"size_only"`
|
||||
IgnoreTimes bool `config:"ignore_times"`
|
||||
|
|
|
@ -46,6 +46,9 @@ const (
|
|||
// ConfigTokenURL is the config key used to store the token server endpoint
|
||||
ConfigTokenURL = "token_url"
|
||||
|
||||
// ConfigClientCredentials - use OAUTH2 client credentials
|
||||
ConfigClientCredentials = "client_credentials"
|
||||
|
||||
// ConfigEncoding is the config key to change the encoding for a backend
|
||||
ConfigEncoding = "encoding"
|
||||
|
||||
|
|
2
fs/fs.go
2
fs/fs.go
|
@ -16,6 +16,8 @@ const (
|
|||
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
|
||||
// MaxLevel is a sentinel representing an infinite depth for listings
|
||||
MaxLevel = math.MaxInt32
|
||||
// The suffix added to a translated symbolic link
|
||||
LinkSuffix = ".rclonelink"
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
|
|
@ -74,12 +74,6 @@ func (job *Job) finish(out rc.Params, err error) {
|
|||
running.kickExpire() // make sure this job gets expired
|
||||
}
|
||||
|
||||
func (job *Job) addListener(fn *func()) {
|
||||
job.mu.Lock()
|
||||
defer job.mu.Unlock()
|
||||
job.listeners = append(job.listeners, fn)
|
||||
}
|
||||
|
||||
func (job *Job) removeListener(fn *func()) {
|
||||
job.mu.Lock()
|
||||
defer job.mu.Unlock()
|
||||
|
@ -94,10 +88,12 @@ func (job *Job) removeListener(fn *func()) {
|
|||
// OnFinish adds listener to job that will be triggered when job is finished.
|
||||
// It returns a function to cancel listening.
|
||||
func (job *Job) OnFinish(fn func()) func() {
|
||||
job.mu.Lock()
|
||||
defer job.mu.Unlock()
|
||||
if job.Finished {
|
||||
fn()
|
||||
go fn()
|
||||
} else {
|
||||
job.addListener(&fn)
|
||||
job.listeners = append(job.listeners, &fn)
|
||||
}
|
||||
return func() { job.removeListener(&fn) }
|
||||
}
|
||||
|
|
|
@ -554,3 +554,52 @@ func TestOnFinishAlreadyFinished(t *testing.T) {
|
|||
t.Fatal("Timeout waiting for OnFinish to fire")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnFinishDataRace(t *testing.T) {
|
||||
jobID.Store(0)
|
||||
job, _, err := NewJob(context.Background(), ctxFn, rc.Params{"_async": true})
|
||||
assert.NoError(t, err)
|
||||
var expect, got uint64
|
||||
finished := make(chan struct{})
|
||||
stop, stopped := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
break Loop
|
||||
default:
|
||||
_, err := OnFinish(job.ID, func() {
|
||||
finished <- struct{}{}
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
expect += 1
|
||||
}
|
||||
}
|
||||
close(stopped)
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
job.Stop()
|
||||
|
||||
// Wait for the first OnFinish to fire
|
||||
<-finished
|
||||
got += 1
|
||||
|
||||
// Stop the OnFinish producer
|
||||
close(stop)
|
||||
<-stopped
|
||||
|
||||
timeout := time.After(5 * time.Second)
|
||||
for {
|
||||
if got == expect {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-finished:
|
||||
got += 1
|
||||
case <-timeout:
|
||||
t.Fatal("Timeout waiting for all OnFinish calls to fire")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
81
go.mod
81
go.mod
|
@ -6,10 +6,10 @@ require (
|
|||
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.3.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.78
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
|
||||
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e
|
||||
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
|
||||
|
@ -17,12 +17,12 @@ require (
|
|||
github.com/anacrolix/dms v1.7.1
|
||||
github.com/anacrolix/log v0.16.0
|
||||
github.com/atotto/clipboard v0.1.4
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.4
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.3
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.44
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.37
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3
|
||||
github.com/aws/smithy-go v1.22.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20231124195312-f3ec8ae2c891
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0
|
||||
|
@ -30,13 +30,13 @@ require (
|
|||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/gabriel-vasile/mimetype v1.4.6
|
||||
github.com/gabriel-vasile/mimetype v1.4.7
|
||||
github.com/gdamore/tcell/v2 v2.7.4
|
||||
github.com/go-chi/chi/v5 v5.1.0
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hanwen/go-fuse/v2 v2.6.3
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
github.com/henrybear327/go-proton-api v1.0.0
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4
|
||||
|
@ -52,7 +52,7 @@ require (
|
|||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/ncw/swift/v2 v2.0.3
|
||||
github.com/oracle/oci-go-sdk/v65 v65.78.0
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/sftp v1.13.7
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||
|
@ -68,8 +68,8 @@ require (
|
|||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002
|
||||
github.com/unknwon/goconfig v1.0.0
|
||||
github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
|
||||
|
@ -78,22 +78,22 @@ require (
|
|||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
go.etcd.io/bbolt v1.3.10
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.29.0
|
||||
golang.org/x/net v0.31.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/net v0.32.0
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/sync v0.9.0
|
||||
golang.org/x/sys v0.27.0
|
||||
golang.org/x/text v0.20.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/time v0.8.0
|
||||
google.golang.org/api v0.205.0
|
||||
google.golang.org/api v0.211.0
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
storj.io/uplink v1.13.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.10.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
|
||||
cloud.google.com/go/auth v0.12.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
|
@ -107,19 +107,19 @@ require (
|
|||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
|
@ -149,7 +149,7 @@ require (
|
|||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
|
@ -199,7 +199,6 @@ require (
|
|||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
||||
|
@ -207,10 +206,9 @@ require (
|
|||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583 // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect
|
||||
|
@ -222,10 +220,9 @@ require (
|
|||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.2
|
||||
github.com/ProtonMail/go-crypto v1.1.3
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/pkg/xattr v0.4.10
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6
|
||||
golang.org/x/term v0.26.0
|
||||
golang.org/x/term v0.27.0
|
||||
)
|
||||
|
|
173
go.sum
173
go.sum
|
@ -15,10 +15,10 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
|||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.10.1 h1:TnK46qldSfHWt2a0b/hciaiVJsmDXWy9FqyUan0uYiI=
|
||||
cloud.google.com/go/auth v0.10.1/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
|
||||
cloud.google.com/go/auth v0.12.1 h1:n2Bj25BUMM0nvE9D2XLTiImanwZhO3DkfWSYS/SAJP4=
|
||||
cloud.google.com/go/auth v0.12.1/go.mod h1:BFMu+TNpF3DmvfBO9ClqTR/SiqVIm7LukKF9mbendF4=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -49,10 +49,10 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xP
|
|||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 h1:cf+OIKbkmMHBaC3u78AXomweqM0oxQSgBXRZf3WH4yM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQKJxSMNiGJcq4QuUQkOynyD93gLw6MDF7ek=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.3.1 h1:a1U6j4GPI18JQCqgz7/DcqXA1vzvGBugm14AXZfU0gs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.3.1/go.mod h1:tZyRNcHi2/yo+ugYHTUuOrHiboKilaizLnRL5aZTe6A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0 h1:mJVYrRyo7/ISs3MLMHphqssqbS1vLJ3uiwo1+fY8OUQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0/go.mod h1:QXy84HaR0FHLPWaGQDBrZZbdCPTshwGl3gQ64uR/Zrc=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
|
@ -61,8 +61,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mx
|
|||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.78 h1:mqZLLi/ditE8osCggrXhVSOG67xNdTLG0SdxkSB0EBI=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.78/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97 h1:c+mQoiES/21JrHDAxJLCYICJO+bu8Clv0ZDNZe7Ndyk=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
|
@ -77,8 +77,8 @@ github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcM
|
|||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v1.1.2 h1:A7JbD57ThNqh7XjmHE+PXpQ3Dqt3BrSAC0AL0Go3KS0=
|
||||
github.com/ProtonMail/go-crypto v1.1.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk=
|
||||
github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
|
@ -108,44 +108,44 @@ github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er
|
|||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.4 h1:S13INUiTxgrPueTmrm5DZ+MiAo99zYzHEFh1UNkOxNE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.4/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.3 h1:kL5uAptPcPKaJ4q0sDUjUIdueO18Q7JDzl64GpVwdOM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.3/go.mod h1:SPEn1KA8YbgQnwiJ/OISU4fz7+F6Fe309Jf0QTsRCl4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.44 h1:qqfs5kulLUHUEXlHEZXLJkgGoF3kkUeFUTVA585cFpU=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.44/go.mod h1:0Lm2YJ8etJdEdw23s+q/9wTpOeo2HhNE97XcRa7T8MA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 h1:woXadbf0c7enQ2UGCi8gW/WuKmE0xIzxBF/eD94jMKQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19/go.mod h1:zminj5ucw7w0r65bP6nhyOd3xL6veAUMc3ElGMoLVb4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.37 h1:jHKR76E81sZvz1+x1vYYrHMxphG5LFBJPhSqEr4CLlE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.37/go.mod h1:iMkyPkmoJWQKzSOtaX+8oEJxAuqr7s8laxcqGDSHeII=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 h1:A2w6m6Tmr+BNXjDsr7M90zkWjsu4JXHwrzPg235STs4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23/go.mod h1:35EVp9wyeANdujZruvHiQUAo9E3vbhnIO1mTCAxMlY0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 h1:pgYW9FCabt2M25MoHYCfMrVY2ghiiBKYWUVXfwZs+sU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23/go.mod h1:c48kLgzO19wAu3CPkDWC28JbaJ+hfQlsdl7I2+oqIbk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43 h1:iLdpkYZ4cXIQMO7ud+cqMWR1xK5ESbt1rvN77tRi1BY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43/go.mod h1:OgbsKPAswXDd5kxnR4vZov69p3oYjbvUyIRBAAV0y9o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.23 h1:1SZBDiRzzs3sNhOMVApyWPduWYGAX0imGy06XiBnCAM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.23/go.mod h1:i9TkxgbZmHVh2S0La6CAXtnyFhlCX/pJ0JsOvBAS6Mk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4 h1:aaPpoG15S2qHkWm4KlEyF01zovK1nW4BBbyXuHNSE90=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4/go.mod h1:eD9gS2EARTKgGr/W5xwgY/ik9z/zqpW+m/xOQbVxrMk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 h1:tHxQi/XHPK0ctd/wdOw0t7Xrc2OxcRCnVzv8lwWPu0c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4GQbF1vJzG60poZqWatZlhP31y8PGCCVTvIGPdaaYJ0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4 h1:E5ZAVOmI2apR8ADb72Q63KqwwwdW1XcMeXIlrZ1Psjg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4/go.mod h1:wezzqVUOVVdk+2Z/JzQT4NxAU0NbhRe5W8pIE72jsWI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3 h1:neNOYJl72bHrz9ikAEED4VqWyND/Po0DnEx64RW6YM4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3/go.mod h1:TMhLIyRIyoGVlaEMAt+ITMbwskSTpcGsCPDq91/ihY0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 h1:HJwZwRt2Z2Tdec+m+fPjvdmkq2s9Ra+VR0hjF7V2o40=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.5/go.mod h1:wrMCEwjFPms+V86TCQQeOxQF/If4vT44FGIOFiMC2ck=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 h1:zcx9LiGWZ6i6pjdcoE9oXAB6mUdeyC36Ia/QEiIvYdg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4/go.mod h1:Tp/ly1cTjRLGBBmNccFumbZ8oqpZlpdhFf80SrRh4is=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 h1:yDxvkz3/uOKfxnv8YhzOi9m+2OGIxF+on3KOISbK5IU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.32.4/go.mod h1:9XEUty5v5UAsMiFOBJrNibZgwCeOma73jgGwwhgffa8=
|
||||
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
|
||||
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 h1:r67ps7oHCYnflpgDy2LZU0MAQtQbYIOqNNnqGO6xQkE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25/go.mod h1:GrGY+Q4fIokYLtjCVB/aFfCVL6hhGUFl8inD18fDalE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 h1:HCpPsWqmYQieU7SS6E9HXfdAMSud0pteVXieJmcpIRI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6/go.mod h1:ngUiVRCco++u+soRRVBIvBZxSMMvOVMXA4PJ36JLfSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 h1:BbGDtTi0T1DYlmjBiCr/le3wzhA37O8QTC5/Ab8+EXk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6/go.mod h1:hLMJt7Q8ePgViKupeymbqI0la+t9/iYFBjxQCFwuAwI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 h1:nyuzXooUNJexRT0Oy0UQY6AhOzxPxhtt4DcBIHyCnmw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0/go.mod h1:sT/iQz8JK3u/5gZkT+Hmr7GzVZehUMkRZpOaAwYXeGY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8=
|
||||
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
|
||||
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradenaw/juniper v0.15.2 h1:0JdjBGEF2jP1pOxmlNIrPhAoQN7Ng5IMAY5D0PHMW4U=
|
||||
|
@ -231,8 +231,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc=
|
||||
github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc=
|
||||
github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA=
|
||||
github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU=
|
||||
github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
|
||||
github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
|
||||
github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
|
||||
|
@ -289,8 +289,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
|
|||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
@ -311,7 +309,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -321,7 +318,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
|
@ -338,15 +334,14 @@ github.com/google/pprof v0.0.0-20240509144519-723abb6459b7/go.mod h1:kf6iHlnVGwg
|
|||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
|
||||
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
|
||||
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
|
@ -354,8 +349,8 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
|
|||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/hanwen/go-fuse/v2 v2.6.3 h1:tDcEkLRx93lXu4XyN1/j8Z74VWvhHDl6qU1kNnvFUqI=
|
||||
github.com/hanwen/go-fuse/v2 v2.6.3/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48=
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2 h1:SbJP1sUP+n1UF8NXBA14BuojmTez+mDgOk0bC057HQw=
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
@ -483,8 +478,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.78.0 h1:iM7lFFA7cJkUD4tmrlsAHWgL3HuTuF9mdvTAliMkcFA=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.78.0/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0 h1:Rr7QLMozd2DfDBKo6AB3DzLYQxAwuOG118+K5AAD5E8=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0=
|
||||
github.com/panjf2000/ants/v2 v2.9.1 h1:Q5vh5xohbsZXGcD6hhszzGqB7jSSc2/CRr3QKIga8Kw=
|
||||
github.com/panjf2000/ants/v2 v2.9.1/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
|
@ -598,10 +593,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002 h1:jevGbwKzMmHLgHAaDaMJLQX3jpXUWjUvnsrPeMgkM7o=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002/go.mod h1:0Mv/XWQoRWF7d7jkc4DufsAJQg8xyZ5NtCkY59wECQY=
|
||||
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
|
@ -650,8 +645,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||
|
@ -678,14 +671,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
|
||||
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -755,12 +747,10 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
|
@ -768,8 +758,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
|||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
|
||||
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
|
||||
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -790,8 +780,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -848,11 +838,10 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
|
@ -861,8 +850,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
|||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
|
||||
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -876,8 +865,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
|||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -954,8 +943,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.205.0 h1:LFaxkAIpDb/GsrWV20dMMo5MR0h8UARTbn24LmD+0Pg=
|
||||
google.golang.org/api v0.205.0/go.mod h1:NrK1EMqO8Xk6l6QwRAmrXXg2v6dzukhlOyvkYtnvUuc=
|
||||
google.golang.org/api v0.211.0 h1:IUpLjq09jxBSV1lACO33CGY3jsRcbctfGzhj+ZSE/Bg=
|
||||
google.golang.org/api v0.211.0/go.mod h1:XOloB4MXFH4UTlQSGuNUxw0UT74qdENK8d6JNsXKLi0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -991,10 +980,11 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583 h1:IfdSdTcLFy4lqUQrQJLkLt1PB+AsqVz6lwkWPzWEz10=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -1007,7 +997,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
|
|||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
||||
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
@ -1020,8 +1009,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
|
|
@ -66,20 +66,21 @@ https. You will need to supply the ` + "`--{{ .Prefix }}cert` and `--{{ .Prefix
|
|||
If you wish to do client side certificate validation then you will need to
|
||||
supply ` + "`--{{ .Prefix }}client-ca`" + ` also.
|
||||
|
||||
` + "`--{{ .Prefix }}cert`" + ` should be a either a PEM encoded certificate or a concatenation
|
||||
of that with the CA certificate. ` + "`--k{{ .Prefix }}ey`" + ` should be the PEM encoded
|
||||
private key and ` + "`--{{ .Prefix }}client-ca`" + ` should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
` + "`--{{ .Prefix }}cert`" + ` must be set to the path of a file containing
|
||||
either a PEM encoded certificate, or a concatenation of that with the CA
|
||||
certificate. ` + "`--{{ .Prefix }}key`" + ` must be set to the path of a file
|
||||
with the PEM encoded private key. ` + "If setting `--{{ .Prefix }}client-ca`" + `,
|
||||
it should be set to the path of a file with PEM encoded client certificate
|
||||
authority certificates.
|
||||
|
||||
` + "`--{{ .Prefix }}min-tls-version`" + ` is minimum TLS version that is acceptable. Valid
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
|
||||
"tls1.0").
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
|
||||
|
||||
### Socket activation
|
||||
|
||||
Instead of the listening addresses specified above, rclone will listen to all
|
||||
FDs passed by the service manager, if any (and ignore any arguments passed by ` +
|
||||
"--{{ .Prefix }}addr`" + `).
|
||||
FDs passed by the service manager, if any (and ignore any arguments passed
|
||||
by ` + "`--{{ .Prefix }}addr`" + `).
|
||||
|
||||
This allows rclone to be a socket-activated service.
|
||||
It can be configured with .socket and .service unit files as described in
|
||||
|
@ -162,11 +163,11 @@ type Config struct {
|
|||
ServerReadTimeout time.Duration `config:"server_read_timeout"` // Timeout for server reading data
|
||||
ServerWriteTimeout time.Duration `config:"server_write_timeout"` // Timeout for server writing data
|
||||
MaxHeaderBytes int `config:"max_header_bytes"` // Maximum size of request header
|
||||
TLSCert string `config:"cert"` // Path to TLS PEM key (concatenation of certificate and CA certificate)
|
||||
TLSKey string `config:"key"` // Path to TLS PEM Private key
|
||||
TLSCertBody []byte `config:"-"` // TLS PEM key (concatenation of certificate and CA certificate) body, ignores TLSCert
|
||||
TLSKeyBody []byte `config:"-"` // TLS PEM Private key body, ignores TLSKey
|
||||
ClientCA string `config:"client_ca"` // Client certificate authority to verify clients with
|
||||
TLSCert string `config:"cert"` // Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
TLSKey string `config:"key"` // Path to TLS PEM private key file
|
||||
TLSCertBody []byte `config:"-"` // TLS PEM public key certificate body (can also include intermediate/CA certificates), ignores TLSCert
|
||||
TLSKeyBody []byte `config:"-"` // TLS PEM private key body, ignores TLSKey
|
||||
ClientCA string `config:"client_ca"` // Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
MinTLSVersion string `config:"min_tls_version"` // MinTLSVersion contains the minimum TLS version that is acceptable.
|
||||
AllowOrigin string `config:"allow_origin"` // AllowOrigin sets the Access-Control-Allow-Origin header
|
||||
}
|
||||
|
@ -177,9 +178,9 @@ func (cfg *Config) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
|
|||
flags.DurationVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", cfg.ServerReadTimeout, "Timeout for server reading data", prefix)
|
||||
flags.DurationVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", cfg.ServerWriteTimeout, "Timeout for server writing data", prefix)
|
||||
flags.IntVarP(flagSet, &cfg.MaxHeaderBytes, prefix+"max-header-bytes", "", cfg.MaxHeaderBytes, "Maximum size of request header", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.TLSCert, prefix+"cert", "", cfg.TLSCert, "TLS PEM key (concatenation of certificate and CA certificate)", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.TLSKey, prefix+"key", "", cfg.TLSKey, "TLS PEM Private key", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.ClientCA, prefix+"client-ca", "", cfg.ClientCA, "Client certificate authority to verify clients with", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.TLSCert, prefix+"cert", "", cfg.TLSCert, "Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.TLSKey, prefix+"key", "", cfg.TLSKey, "Path to TLS PEM private key file", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.ClientCA, prefix+"client-ca", "", cfg.ClientCA, "Path to TLS PEM CA file with certificate authorities to verify clients with", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.BaseURL, prefix+"baseurl", "", cfg.BaseURL, "Prefix for URLs - leave blank for root", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.MinTLSVersion, prefix+"min-tls-version", "", cfg.MinTLSVersion, "Minimum TLS version that is acceptable", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.AllowOrigin, prefix+"allow-origin", "", cfg.AllowOrigin, "Origin which cross-domain request (CORS) can be executed from", prefix)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -23,6 +24,7 @@ import (
|
|||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -85,6 +87,49 @@ All done. Please go back to rclone.
|
|||
// should work for most uses, but may be overridden.
|
||||
var OpenURL = open.Start
|
||||
|
||||
// Config - structure that we will use to store the OAuth configuration
|
||||
// settings. This is based on the union of the configuration structures for the two
|
||||
// OAuth modules that we are using (oauth2 and oauth2.clientcrentials), along with a
|
||||
// flag indicating if we are going to use the client credential flow
|
||||
type Config struct {
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
TokenURL string
|
||||
AuthURL string
|
||||
Scopes []string
|
||||
EndpointParams url.Values
|
||||
RedirectURL string
|
||||
ClientCredentialFlow bool
|
||||
AuthStyle oauth2.AuthStyle
|
||||
}
|
||||
|
||||
// MakeOauth2Config makes an oauth2.Config from our config
|
||||
func (conf *Config) MakeOauth2Config() *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
ClientID: conf.ClientID,
|
||||
ClientSecret: conf.ClientSecret,
|
||||
RedirectURL: RedirectLocalhostURL,
|
||||
Scopes: conf.Scopes,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: conf.AuthURL,
|
||||
TokenURL: conf.TokenURL,
|
||||
AuthStyle: conf.AuthStyle,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MakeClientCredentialsConfig makes a clientcredentials.Config from our config
|
||||
func (conf *Config) MakeClientCredentialsConfig() *clientcredentials.Config {
|
||||
return &clientcredentials.Config{
|
||||
ClientID: conf.ClientID,
|
||||
ClientSecret: conf.ClientSecret,
|
||||
Scopes: conf.Scopes,
|
||||
TokenURL: conf.TokenURL,
|
||||
AuthStyle: conf.AuthStyle,
|
||||
// EndpointParams url.Values
|
||||
}
|
||||
}
|
||||
|
||||
// SharedOptions are shared between backends the utilize an OAuth flow
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
|
@ -107,6 +152,11 @@ var SharedOptions = []fs.Option{{
|
|||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\n\nLeave blank to use the provider defaults.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigClientCredentials,
|
||||
Default: false,
|
||||
Help: "Use client credentials OAuth flow.\n\nThis will use the OAUTH2 client Credentials Flow as described in RFC 6749.",
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
// oldToken contains an end-user's tokens.
|
||||
|
@ -178,7 +228,7 @@ type TokenSource struct {
|
|||
m configmap.Mapper
|
||||
tokenSource oauth2.TokenSource
|
||||
token *oauth2.Token
|
||||
config *oauth2.Config
|
||||
config *Config
|
||||
ctx context.Context
|
||||
expiryTimer *time.Timer // signals whenever the token expires
|
||||
}
|
||||
|
@ -264,6 +314,11 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||
)
|
||||
const maxTries = 5
|
||||
|
||||
// If we have a cached valid token, use that
|
||||
if ts.token.Valid() {
|
||||
return ts.token, nil
|
||||
}
|
||||
|
||||
// Try getting the token a few times
|
||||
for i := 1; i <= maxTries; i++ {
|
||||
// Try reading the token from the config file in case it has
|
||||
|
@ -271,7 +326,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||
if !ts.token.Valid() {
|
||||
if ts.reReadToken() {
|
||||
changed = true
|
||||
} else if ts.token.RefreshToken == "" {
|
||||
} else if !ts.config.ClientCredentialFlow && ts.token.RefreshToken == "" {
|
||||
return nil, fserrors.FatalError(
|
||||
fmt.Errorf("token expired and there's no refresh token - manually refresh with \"rclone config reconnect %s:\"", ts.name),
|
||||
)
|
||||
|
@ -280,7 +335,11 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||
|
||||
// Make a new token source if required
|
||||
if ts.tokenSource == nil {
|
||||
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
|
||||
if ts.config.ClientCredentialFlow {
|
||||
ts.tokenSource = ts.config.MakeClientCredentialsConfig().TokenSource(ts.ctx)
|
||||
} else {
|
||||
ts.tokenSource = ts.config.MakeOauth2Config().TokenSource(ts.ctx, ts.token)
|
||||
}
|
||||
}
|
||||
|
||||
token, err = ts.tokenSource.Token()
|
||||
|
@ -297,7 +356,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't fetch token: %w", err)
|
||||
}
|
||||
changed = changed || token.AccessToken != ts.token.AccessToken || token.RefreshToken != ts.token.RefreshToken || token.Expiry != ts.token.Expiry
|
||||
changed = changed || ts.token == nil || token.AccessToken != ts.token.AccessToken || token.RefreshToken != ts.token.RefreshToken || token.Expiry != ts.token.Expiry
|
||||
ts.token = token
|
||||
if changed {
|
||||
// Bump on the expiry timer if it is set
|
||||
|
@ -370,12 +429,12 @@ func Context(ctx context.Context, client *http.Client) context.Context {
|
|||
return context.WithValue(ctx, oauth2.HTTPClient, client)
|
||||
}
|
||||
|
||||
// overrideCredentials sets the ClientID and ClientSecret from the
|
||||
// OverrideCredentials sets the ClientID and ClientSecret from the
|
||||
// config file if they are not blank.
|
||||
// If any value is overridden, true is returned.
|
||||
// the origConfig is copied
|
||||
func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
|
||||
newConfig = new(oauth2.Config)
|
||||
func OverrideCredentials(name string, m configmap.Mapper, origConfig *Config) (newConfig *Config, changed bool) {
|
||||
newConfig = new(Config)
|
||||
*newConfig = *origConfig
|
||||
changed = false
|
||||
ClientID, ok := m.Get(config.ConfigClientID)
|
||||
|
@ -393,12 +452,22 @@ func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Con
|
|||
}
|
||||
AuthURL, ok := m.Get(config.ConfigAuthURL)
|
||||
if ok && AuthURL != "" {
|
||||
newConfig.Endpoint.AuthURL = AuthURL
|
||||
newConfig.AuthURL = AuthURL
|
||||
changed = true
|
||||
}
|
||||
TokenURL, ok := m.Get(config.ConfigTokenURL)
|
||||
if ok && TokenURL != "" {
|
||||
newConfig.Endpoint.TokenURL = TokenURL
|
||||
newConfig.TokenURL = TokenURL
|
||||
changed = true
|
||||
}
|
||||
ClientCredentialStr, ok := m.Get(config.ConfigClientCredentials)
|
||||
if ok && ClientCredentialStr != "" {
|
||||
ClientCredential, err := strconv.ParseBool(ClientCredentialStr)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Invalid setting for %q: %v", config.ConfigClientCredentials, err)
|
||||
} else {
|
||||
newConfig.ClientCredentialFlow = ClientCredential
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
return newConfig, changed
|
||||
|
@ -408,8 +477,8 @@ func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Con
|
|||
// configures a Client with it. It returns the client and a
|
||||
// TokenSource which Invalidate may need to be called on. It uses the
|
||||
// httpClient passed in as the base client.
|
||||
func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||
config, _ = overrideCredentials(name, m, config)
|
||||
func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mapper, config *Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||
config, _ = OverrideCredentials(name, m, config)
|
||||
token, err := GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -428,12 +497,39 @@ func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mappe
|
|||
ctx: ctx,
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), ts, nil
|
||||
}
|
||||
|
||||
// NewClientCredentialsClient creates a new OAuth module using the
|
||||
// ClientCredential flow
|
||||
func NewClientCredentialsClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||
oauthConfig, _ = OverrideCredentials(name, m, oauthConfig)
|
||||
token, _ := GetToken(name, m)
|
||||
// If the token doesn't exist then we will fetch one in the next step as we don't need a refresh token
|
||||
|
||||
// Set our own http client in the context
|
||||
ctx = Context(ctx, baseClient)
|
||||
|
||||
// Wrap the TokenSource in our TokenSource which saves changed
|
||||
// tokens in the config file
|
||||
ts := &TokenSource{
|
||||
name: name,
|
||||
m: m,
|
||||
token: token,
|
||||
config: oauthConfig,
|
||||
ctx: ctx,
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), ts, nil
|
||||
}
|
||||
|
||||
// NewClient gets a token from the config file and configures a Client
|
||||
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
|
||||
func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
||||
// with it. It returns the client and a TokenSource which Invalidate
|
||||
// may need to be called on
|
||||
func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config) (*http.Client, *TokenSource, error) {
|
||||
// Check whether we are using the client credentials flow
|
||||
if oauthConfig.ClientCredentialFlow {
|
||||
|
||||
return NewClientCredentialsClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
|
||||
}
|
||||
return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
|
||||
}
|
||||
|
||||
|
@ -460,11 +556,11 @@ func (ar *AuthResult) Error() string {
|
|||
}
|
||||
|
||||
// CheckAuthFn is called when a good Auth has been received
|
||||
type CheckAuthFn func(*oauth2.Config, *AuthResult) error
|
||||
type CheckAuthFn func(*Config, *AuthResult) error
|
||||
|
||||
// Options for the oauth config
|
||||
type Options struct {
|
||||
OAuth2Config *oauth2.Config // Basic config for oauth2
|
||||
OAuth2Config *Config // Basic config for oauth2
|
||||
NoOffline bool // If set then "access_type=offline" parameter is not passed
|
||||
CheckAuth CheckAuthFn // When the AuthResult is known the checkAuth function is called if set
|
||||
OAuth2Opts []oauth2.AuthCodeOption // extra oauth2 options
|
||||
|
@ -532,6 +628,15 @@ func ConfigOAuth(ctx context.Context, name string, m configmap.Mapper, ri *fs.Re
|
|||
if in.Result == "false" {
|
||||
return fs.ConfigGoto(newState("*oauth-done"))
|
||||
}
|
||||
opt, err := getOAuth()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oauthConfig, _ := OverrideCredentials(name, m, opt.OAuth2Config)
|
||||
if oauthConfig.ClientCredentialFlow {
|
||||
// If using client credential flow, skip straight to getting the token since we don't need a browser
|
||||
return fs.ConfigGoto(newState("*oauth-do"))
|
||||
}
|
||||
return fs.ConfigConfirm(newState("*oauth-islocal"), true, "config_is_local", "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n")
|
||||
case "*oauth-islocal":
|
||||
if in.Result == "true" {
|
||||
|
@ -626,20 +731,27 @@ version recommended):
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oauthConfig, changed := overrideCredentials(name, m, opt.OAuth2Config)
|
||||
oauthConfig, changed := OverrideCredentials(name, m, opt.OAuth2Config)
|
||||
if changed {
|
||||
fs.Logf(nil, "Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
|
||||
}
|
||||
if code == "" {
|
||||
oauthConfig = fixRedirect(oauthConfig)
|
||||
code, err = configSetup(ctx, ri.Name, name, m, oauthConfig, opt)
|
||||
if oauthConfig.ClientCredentialFlow {
|
||||
err = clientCredentialsFlowGetToken(ctx, name, m, oauthConfig, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config failed to refresh token: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if code == "" {
|
||||
oauthConfig = fixRedirect(oauthConfig)
|
||||
code, err = configSetup(ctx, ri.Name, name, m, oauthConfig, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config failed to refresh token: %w", err)
|
||||
}
|
||||
}
|
||||
err = configExchange(ctx, name, m, oauthConfig, code)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = configExchange(ctx, name, m, oauthConfig, code)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigGoto(newState("*oauth-done"))
|
||||
case "*oauth-done":
|
||||
|
@ -656,13 +768,13 @@ func init() {
|
|||
}
|
||||
|
||||
// Return true if can run without a webserver and just entering a code
|
||||
func noWebserverNeeded(oauthConfig *oauth2.Config) bool {
|
||||
func noWebserverNeeded(oauthConfig *Config) bool {
|
||||
return oauthConfig.RedirectURL == TitleBarRedirectURL
|
||||
}
|
||||
|
||||
// get the URL we need to send the user to
|
||||
func getAuthURL(name string, m configmap.Mapper, oauthConfig *oauth2.Config, opt *Options) (authURL string, state string, err error) {
|
||||
oauthConfig, _ = overrideCredentials(name, m, oauthConfig)
|
||||
func getAuthURL(name string, m configmap.Mapper, oauthConfig *Config, opt *Options) (authURL string, state string, err error) {
|
||||
oauthConfig, _ = OverrideCredentials(name, m, oauthConfig)
|
||||
|
||||
// Make random state
|
||||
state, err = random.Password(128)
|
||||
|
@ -670,18 +782,21 @@ func getAuthURL(name string, m configmap.Mapper, oauthConfig *oauth2.Config, opt
|
|||
return "", "", err
|
||||
}
|
||||
|
||||
// Create the configuration required for the OAuth flow
|
||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
||||
|
||||
// Generate oauth URL
|
||||
opts := opt.OAuth2Opts
|
||||
if !opt.NoOffline {
|
||||
opts = append(opts, oauth2.AccessTypeOffline)
|
||||
}
|
||||
authURL = oauthConfig.AuthCodeURL(state, opts...)
|
||||
authURL = oauth2Conf.AuthCodeURL(state, opts...)
|
||||
return authURL, state, nil
|
||||
}
|
||||
|
||||
// If TitleBarRedirect is set but we are doing a real oauth, then
|
||||
// override our redirect URL
|
||||
func fixRedirect(oauthConfig *oauth2.Config) *oauth2.Config {
|
||||
func fixRedirect(oauthConfig *Config) *Config {
|
||||
switch oauthConfig.RedirectURL {
|
||||
case TitleBarRedirectURL:
|
||||
// copy the config and set to use the internal webserver
|
||||
|
@ -692,12 +807,33 @@ func fixRedirect(oauthConfig *oauth2.Config) *oauth2.Config {
|
|||
return oauthConfig
|
||||
}
|
||||
|
||||
// configSetup does the initial creation of the token for the client credentials flow
|
||||
//
|
||||
// If opt is nil it will use the default Options.
|
||||
func clientCredentialsFlowGetToken(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config, opt *Options) error {
|
||||
if opt == nil {
|
||||
opt = &Options{}
|
||||
}
|
||||
_ = opt // not currently using the Options
|
||||
fs.Debugf(nil, "Getting token for client credentials flow")
|
||||
_, tokenSource, err := NewClientCredentialsClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("client credentials flow: failed to make client: %w", err)
|
||||
}
|
||||
// Get the token and save it in the config file
|
||||
_, err = tokenSource.Token()
|
||||
if err != nil {
|
||||
return fmt.Errorf("client credentials flow: failed to get token: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// configSetup does the initial creation of the token
|
||||
//
|
||||
// If opt is nil it will use the default Options.
|
||||
//
|
||||
// It will run an internal webserver to receive the results
|
||||
func configSetup(ctx context.Context, id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, opt *Options) (string, error) {
|
||||
func configSetup(ctx context.Context, id, name string, m configmap.Mapper, oauthConfig *Config, opt *Options) (string, error) {
|
||||
if opt == nil {
|
||||
opt = &Options{}
|
||||
}
|
||||
|
@ -749,9 +885,13 @@ func configSetup(ctx context.Context, id, name string, m configmap.Mapper, oauth
|
|||
}
|
||||
|
||||
// Exchange the code for a token
|
||||
func configExchange(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config, code string) error {
|
||||
func configExchange(ctx context.Context, name string, m configmap.Mapper, oauthConfig *Config, code string) error {
|
||||
ctx = Context(ctx, fshttp.NewClient(ctx))
|
||||
token, err := oauthConfig.Exchange(ctx, code)
|
||||
|
||||
// Create the configuration required for the OAuth flow
|
||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
||||
|
||||
token, err := oauth2Conf.Exchange(ctx, code)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get token: %w", err)
|
||||
}
|
||||
|
@ -813,10 +953,17 @@ func (s *authServer) handleAuth(w http.ResponseWriter, req *http.Request) {
|
|||
// get code, error if empty
|
||||
code := req.Form.Get("code")
|
||||
if code == "" {
|
||||
reply(http.StatusBadRequest, &AuthResult{
|
||||
err := &AuthResult{
|
||||
Name: "Auth Error",
|
||||
Description: "No code returned by remote server",
|
||||
})
|
||||
}
|
||||
if errorCode := req.Form.Get("error"); errorCode != "" {
|
||||
err.Description += ": " + errorCode
|
||||
}
|
||||
if errorMessage := req.Form.Get("error_description"); errorMessage != "" {
|
||||
err.Description += ": " + errorMessage
|
||||
}
|
||||
reply(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
17
vfs/dir.go
17
vfs/dir.go
|
@ -459,7 +459,8 @@ func (d *Dir) addObject(node Node) {
|
|||
// This will be replaced with a real object when it is read back from the
|
||||
// remote.
|
||||
//
|
||||
// This is used to add directory entries while things are uploading
|
||||
// This is used by the vfs cache to insert objects that are uploading
|
||||
// into the directory tree.
|
||||
func (d *Dir) AddVirtual(leaf string, size int64, isDir bool) {
|
||||
var node Node
|
||||
d.mu.RLock()
|
||||
|
@ -475,7 +476,16 @@ func (d *Dir) AddVirtual(leaf string, size int64, isDir bool) {
|
|||
entry := fs.NewDir(remote, time.Now())
|
||||
node = newDir(d.vfs, d.f, d, entry)
|
||||
} else {
|
||||
isLink := false
|
||||
if d.vfs.Opt.Links {
|
||||
// since the path came from the cache it may have fs.LinkSuffix,
|
||||
// so remove it and mark the *File accordingly
|
||||
leaf, isLink = strings.CutSuffix(leaf, fs.LinkSuffix)
|
||||
}
|
||||
f := newFile(d, dPath, nil, leaf)
|
||||
if isLink {
|
||||
f.setSymlink()
|
||||
}
|
||||
f.setSize(size)
|
||||
node = f
|
||||
}
|
||||
|
@ -628,7 +638,7 @@ func (d *Dir) _purgeVirtual() {
|
|||
// if writing in progress then leave virtual
|
||||
continue
|
||||
}
|
||||
if d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal && d.vfs.cache.InUse(f.Path()) {
|
||||
if d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal && d.vfs.cache.InUse(f.CachePath()) {
|
||||
// if object in use or dirty then leave virtual
|
||||
continue
|
||||
}
|
||||
|
@ -718,6 +728,9 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
|
|||
if name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
if d.vfs.Opt.Links {
|
||||
name, _ = strings.CutSuffix(name, fs.LinkSuffix)
|
||||
}
|
||||
node := d.items[name]
|
||||
if mv.add(d, name) {
|
||||
continue
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
// Error describes low level errors in a cross platform way.
|
||||
type Error byte
|
||||
|
||||
// NB if changing errors translateError in cmd/mount/fs.go, cmd/cmount/fs.go
|
||||
// NB if changing errors, update translateError in cmd/mount/fs.go, cmd/cmount/fs.go, cmd/mount2/fs.go
|
||||
|
||||
// Low level errors
|
||||
const (
|
||||
|
@ -20,6 +20,7 @@ const (
|
|||
EBADF
|
||||
EROFS
|
||||
ENOSYS
|
||||
ELOOP
|
||||
)
|
||||
|
||||
// Errors which have exact counterparts in os
|
||||
|
@ -38,6 +39,7 @@ var errorNames = []string{
|
|||
EBADF: "Bad file descriptor",
|
||||
EROFS: "Read only file system",
|
||||
ENOSYS: "Function not implemented",
|
||||
ELOOP: "Too many symbolic links",
|
||||
}
|
||||
|
||||
// Error renders the error as a string
|
||||
|
|
177
vfs/file.go
177
vfs/file.go
|
@ -4,8 +4,10 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -34,7 +36,7 @@ import (
|
|||
//
|
||||
// File may **not** read any members of Dir directly.
|
||||
|
||||
// File represents a file
|
||||
// File represents a file or a symlink
|
||||
type File struct {
|
||||
inode uint64 // inode number - read only
|
||||
size atomic.Int64 // size of file
|
||||
|
@ -53,6 +55,7 @@ type File struct {
|
|||
sys atomic.Value // user defined info to be attached here
|
||||
nwriters atomic.Int32 // len(writers)
|
||||
appendMode bool // file was opened with O_APPEND
|
||||
isLink bool // file represents a symlink
|
||||
}
|
||||
|
||||
// newFile creates a new File
|
||||
|
@ -69,9 +72,18 @@ func newFile(d *Dir, dPath string, o fs.Object, leaf string) *File {
|
|||
if o != nil {
|
||||
f.size.Store(o.Size())
|
||||
}
|
||||
f._setIsLink()
|
||||
return f
|
||||
}
|
||||
|
||||
// Set whether this is a link or not based on f.o
|
||||
func (f *File) _setIsLink() {
|
||||
if f.o == nil {
|
||||
return
|
||||
}
|
||||
f.isLink = f.d.vfs.Opt.Links && strings.HasSuffix(f.o.Remote(), fs.LinkSuffix)
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (f *File) String() string {
|
||||
if f == nil {
|
||||
|
@ -90,13 +102,31 @@ func (f *File) IsDir() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// IsSymlink returns true for symlinks when --links is enabled
|
||||
func (f *File) IsSymlink() bool {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
return f.isLink
|
||||
}
|
||||
|
||||
// setSymlink marks this File as being a symlink
|
||||
func (f *File) setSymlink() {
|
||||
f.mu.RLock()
|
||||
f.isLink = true
|
||||
f.mu.RUnlock()
|
||||
}
|
||||
|
||||
// Mode bits of the file or directory - satisfies Node interface
|
||||
func (f *File) Mode() (mode os.FileMode) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
mode = os.FileMode(f.d.vfs.Opt.FilePerms)
|
||||
if f.appendMode {
|
||||
mode |= os.ModeAppend
|
||||
if f.isLink {
|
||||
mode = os.FileMode(f.d.vfs.Opt.LinkPerms)
|
||||
} else {
|
||||
mode = os.FileMode(f.d.vfs.Opt.FilePerms)
|
||||
if f.appendMode {
|
||||
mode |= os.ModeAppend
|
||||
}
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
@ -122,6 +152,34 @@ func (f *File) Path() string {
|
|||
return path.Join(dPath, leaf)
|
||||
}
|
||||
|
||||
// _fixCachePath returns fullPath with the fs.LinkSuffix added if appropriate
|
||||
// use when lock is held
|
||||
func (f *File) _fixCachePath(fullPath string) string {
|
||||
if !f.isLink {
|
||||
return fullPath
|
||||
}
|
||||
return fullPath + fs.LinkSuffix
|
||||
}
|
||||
|
||||
// _cachePath returns the full path of the file with the fs.LinkSuffix if appropriate
|
||||
// use when lock is held
|
||||
func (f *File) _cachePath() string {
|
||||
dPath, leaf := f.dPath, f.leaf
|
||||
if f.isLink {
|
||||
leaf += fs.LinkSuffix
|
||||
}
|
||||
return path.Join(dPath, leaf)
|
||||
}
|
||||
|
||||
// CachePath returns the full path of the file with the fs.LinkSuffix if appropriate
|
||||
//
|
||||
// We use this path when storing files in the cache.
|
||||
func (f *File) CachePath() string {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
return f._cachePath()
|
||||
}
|
||||
|
||||
// Sys returns underlying data source (can be nil) - satisfies Node interface
|
||||
func (f *File) Sys() interface{} {
|
||||
return f.sys.Load()
|
||||
|
@ -172,6 +230,8 @@ func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
|
|||
f.mu.RLock()
|
||||
d := f.d
|
||||
oldPendingRenameFun := f.pendingRenameFun
|
||||
oldPath := f._cachePath()
|
||||
newCacheName := f._fixCachePath(newName)
|
||||
f.mu.RUnlock()
|
||||
|
||||
if features := d.Fs().Features(); features.Move == nil && features.Copy == nil {
|
||||
|
@ -180,9 +240,8 @@ func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
oldPath := f.Path()
|
||||
// File.mu is unlocked here to call Dir.Path()
|
||||
newPath := path.Join(destDir.Path(), newName)
|
||||
newPath := path.Join(destDir.Path(), newCacheName)
|
||||
|
||||
renameCall := func(ctx context.Context) (err error) {
|
||||
// chain rename calls if any
|
||||
|
@ -231,6 +290,7 @@ func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
|
|||
f.mu.Lock()
|
||||
if newObject != nil {
|
||||
f.o = newObject
|
||||
f._setIsLink()
|
||||
}
|
||||
f.pendingRenameFun = nil
|
||||
f.mu.Unlock()
|
||||
|
@ -334,7 +394,7 @@ func (f *File) ModTime() (modTime time.Time) {
|
|||
}
|
||||
// Read the modtime from a dirty item if it exists
|
||||
if f.d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal {
|
||||
if item := f.d.vfs.cache.DirtyItem(f._path()); item != nil {
|
||||
if item := f.d.vfs.cache.DirtyItem(f._cachePath()); item != nil {
|
||||
modTime, err := item.GetModTime()
|
||||
if err != nil {
|
||||
fs.Errorf(f._path(), "ModTime: Item GetModTime failed: %v", err)
|
||||
|
@ -371,7 +431,7 @@ func (f *File) Size() int64 {
|
|||
|
||||
// Read the size from a dirty item if it exists
|
||||
if f.d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal {
|
||||
if item := f.d.vfs.cache.DirtyItem(f._path()); item != nil {
|
||||
if item := f.d.vfs.cache.DirtyItem(f._cachePath()); item != nil {
|
||||
size, err := item.GetSize()
|
||||
if err != nil {
|
||||
fs.Errorf(f._path(), "Size: Item GetSize failed: %v", err)
|
||||
|
@ -404,8 +464,8 @@ func (f *File) SetModTime(modTime time.Time) error {
|
|||
f.pendingModTime = modTime
|
||||
|
||||
// set the time of the file in the cache
|
||||
if f.d.vfs.cache != nil && f.d.vfs.cache.Exists(f._path()) {
|
||||
f.d.vfs.cache.SetModTime(f._path(), f.pendingModTime)
|
||||
if f.d.vfs.cache != nil && f.d.vfs.cache.Exists(f._cachePath()) {
|
||||
f.d.vfs.cache.SetModTime(f._cachePath(), f.pendingModTime)
|
||||
}
|
||||
|
||||
// Only update the ModTime when there are no writers, setObject will do it
|
||||
|
@ -480,6 +540,7 @@ func (f *File) setSize(n int64) {
|
|||
func (f *File) setObject(o fs.Object) {
|
||||
f.mu.Lock()
|
||||
f.o = o
|
||||
f._setIsLink()
|
||||
_ = f._applyPendingModTime()
|
||||
d := f.d
|
||||
f.mu.Unlock()
|
||||
|
@ -493,6 +554,7 @@ func (f *File) setObject(o fs.Object) {
|
|||
func (f *File) setObjectNoUpdate(o fs.Object) {
|
||||
f.mu.Lock()
|
||||
f.o = o
|
||||
f._setIsLink()
|
||||
f.virtualModTime = nil
|
||||
fs.Debugf(f._path(), "Reset virtual modtime")
|
||||
f.mu.Unlock()
|
||||
|
@ -611,8 +673,8 @@ func (f *File) Remove() (err error) {
|
|||
|
||||
// Remove the object from the cache
|
||||
wasWriting := false
|
||||
if d.vfs.cache != nil && d.vfs.cache.Exists(f.Path()) {
|
||||
wasWriting = d.vfs.cache.Remove(f.Path())
|
||||
if d.vfs.cache != nil && d.vfs.cache.Exists(f.CachePath()) {
|
||||
wasWriting = d.vfs.cache.Remove(f.CachePath())
|
||||
}
|
||||
|
||||
f.muRW.Lock() // muRW must be locked before mu to avoid
|
||||
|
@ -673,6 +735,85 @@ func (f *File) Fs() fs.Fs {
|
|||
return f.d.Fs()
|
||||
}
|
||||
|
||||
// MaxSymlinkIterations is the largest number of symlink evaluations EvalSymlinks will do.
|
||||
const MaxSymlinkIterations = 32
|
||||
|
||||
// If f is a symlink then it resolves it to a new Node.
|
||||
//
|
||||
// This is a simplistic symlink resolver - it only resolves direct
|
||||
// symlinks, it will **not** resolve paths that point into a directory
|
||||
// via a symlink.
|
||||
//
|
||||
// It returns the target node after the evaluation of all symbolic
|
||||
// links.
|
||||
//
|
||||
// It returns an error if too many symlinks need to be resolved
|
||||
// (ELOOP) or there is a loop.
|
||||
func (f *File) resolveNode() (target Node, err error) {
|
||||
defer log.Trace(f.Path(), "")("target=%v, err=%v", &target, &err)
|
||||
seen := make(map[string]struct{})
|
||||
for tries := 0; tries < MaxSymlinkIterations; tries++ {
|
||||
// If f isn't a symlink, we've arrived at the target
|
||||
if !f.IsSymlink() {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Read the symlink
|
||||
fd, err := f.Open(os.O_RDONLY | o_SYMLINK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := io.ReadAll(fd)
|
||||
closeErr := fd.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if closeErr != nil {
|
||||
return nil, closeErr
|
||||
}
|
||||
targetPath := string(b)
|
||||
|
||||
// Convert to a path relative to the root
|
||||
// Symlinks are relative to their file node
|
||||
if !path.IsAbs(targetPath) {
|
||||
basePath := path.Dir(f.Path())
|
||||
targetPath = path.Join(basePath, targetPath)
|
||||
}
|
||||
|
||||
// Clean the path, rclone style
|
||||
targetPath = path.Clean(targetPath)
|
||||
if targetPath == "." {
|
||||
targetPath = ""
|
||||
}
|
||||
|
||||
// Check if we've already seen this path
|
||||
if _, ok := seen[targetPath]; ok {
|
||||
return nil, ELOOP
|
||||
}
|
||||
seen[targetPath] = struct{}{}
|
||||
|
||||
// Resolve the targetPath into a node
|
||||
target, err = f.d.vfs.Stat(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return node as it must be the destination if not a file
|
||||
var ok bool
|
||||
f, ok = target.(*File)
|
||||
if !ok {
|
||||
return target, nil
|
||||
}
|
||||
}
|
||||
return nil, ELOOP
|
||||
}
|
||||
|
||||
// Open also also implements the internal flag o_SYMLINK which instead
|
||||
// of opening the file a symlink points to, opens the symlink itself.
|
||||
// This is used for reading and writing the symlink and shouldn't be
|
||||
// used externally.
|
||||
const o_SYMLINK = 0x4000_0000 //nolint:revive
|
||||
|
||||
// Open a file according to the flags provided
|
||||
//
|
||||
// O_RDONLY open the file read-only.
|
||||
|
@ -694,6 +835,16 @@ func (f *File) Open(flags int) (fd Handle, err error) {
|
|||
rdwrMode = flags & accessModeMask
|
||||
)
|
||||
|
||||
// If this is a symlink, then resolve it
|
||||
if f.IsSymlink() && flags&o_SYMLINK == 0 {
|
||||
target, err := f.resolveNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return target.Open(flags)
|
||||
}
|
||||
flags &^= o_SYMLINK
|
||||
|
||||
// http://pubs.opengroup.org/onlinepubs/7908799/xsh/open.html
|
||||
// The result of using O_TRUNC with O_RDONLY is undefined.
|
||||
// Linux seems to truncate the file, but we prefer to return EINVAL
|
||||
|
@ -738,7 +889,7 @@ func (f *File) Open(flags int) (fd Handle, err error) {
|
|||
d := f.d
|
||||
f.mu.RUnlock()
|
||||
CacheMode := d.vfs.Opt.CacheMode
|
||||
if CacheMode >= vfscommon.CacheModeMinimal && (d.vfs.cache.InUse(f.Path()) || d.vfs.cache.Exists(f.Path())) {
|
||||
if CacheMode >= vfscommon.CacheModeMinimal && (d.vfs.cache.InUse(f.CachePath()) || d.vfs.cache.Exists(f.CachePath())) {
|
||||
fd, err = f.openRW(flags)
|
||||
} else if read && write {
|
||||
if CacheMode >= vfscommon.CacheModeMinimal {
|
||||
|
|
|
@ -43,7 +43,7 @@ func (fh *RWFileHandle) Unlock() error {
|
|||
func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) {
|
||||
defer log.Trace(f.Path(), "")("err=%v", &err)
|
||||
// get an item to represent this from the cache
|
||||
item := d.vfs.cache.Item(f.Path())
|
||||
item := d.vfs.cache.Item(f.CachePath())
|
||||
|
||||
exists := f.exists() || (item.Exists() && !item.WrittenBack())
|
||||
|
||||
|
|
109
vfs/vfs.go
109
vfs/vfs.go
|
@ -242,6 +242,11 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
|
|||
fs.Logf(f, "--vfs-cache-mode writes or full is recommended for this remote as it can't stream")
|
||||
}
|
||||
|
||||
// Warn if we handle symlinks
|
||||
if vfs.Opt.Links {
|
||||
fs.Logf(f, "Symlinks support enabled")
|
||||
}
|
||||
|
||||
// Pin the Fs into the cache so that when we use cache.NewFs
|
||||
// with the same remote string we get this one. The Pin is
|
||||
// removed when the vfs is finalized
|
||||
|
@ -626,6 +631,10 @@ func (vfs *VFS) Statfs() (total, used, free int64) {
|
|||
return nil
|
||||
})
|
||||
vfs.usage.Used = &usedBySizeAlgorithm
|
||||
// if we read a Total size then we should calculate Free from it
|
||||
if vfs.usage.Total != nil {
|
||||
vfs.usage.Free = nil
|
||||
}
|
||||
}
|
||||
vfs.usageTime = time.Now()
|
||||
if err != nil {
|
||||
|
@ -766,7 +775,25 @@ func (vfs *VFS) ReadFile(filename string) (b []byte, err error) {
|
|||
return io.ReadAll(f)
|
||||
}
|
||||
|
||||
// WriteFile writes data to the named file, creating it if necessary. If the
|
||||
// file does not exist, WriteFile creates it with permissions perm (before
|
||||
// umask); otherwise WriteFile truncates it before writing, without changing
|
||||
// permissions. Since WriteFile requires multiple system calls to complete,
|
||||
// a failure mid-operation can leave the file in a partially written state.
|
||||
func (vfs *VFS) WriteFile(name string, data []byte, perm os.FileMode) (err error) {
|
||||
fh, err := vfs.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(fh, &err)
|
||||
_, err = fh.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// AddVirtual adds the object (file or dir) to the directory cache
|
||||
//
|
||||
// This is used by the vfs cache to insert objects that are uploading
|
||||
// into the directory tree.
|
||||
func (vfs *VFS) AddVirtual(remote string, size int64, isDir bool) (err error) {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
var dir *Dir
|
||||
|
@ -784,3 +811,85 @@ func (vfs *VFS) AddVirtual(remote string, size int64, isDir bool) (err error) {
|
|||
dir.AddVirtual(leaf, size, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func (vfs *VFS) Readlink(name string) (s string, err error) {
|
||||
if !vfs.Opt.Links {
|
||||
fs.Errorf(nil, "symlinks not supported without the --links flag: %v", name)
|
||||
return "", ENOSYS
|
||||
}
|
||||
node, err := vfs.Stat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file, ok := node.(*File)
|
||||
if !ok || !file.IsSymlink() {
|
||||
return "", EINVAL // not a symlink
|
||||
}
|
||||
fd, err := file.Open(os.O_RDONLY | o_SYMLINK)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer fs.CheckClose(fd, &err)
|
||||
b, err := io.ReadAll(fd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// CreateSymlink creates newname as a symbolic link to oldname.
|
||||
// On Windows, a symlink to a non-existent oldname creates a file symlink;
|
||||
// if oldname is later created as a directory the symlink will not work.
|
||||
// It returns the node created
|
||||
func (vfs *VFS) CreateSymlink(oldname, newname string) (Node, error) {
|
||||
if !vfs.Opt.Links {
|
||||
fs.Errorf(newname, "symlinks not supported without the --links flag")
|
||||
return nil, ENOSYS
|
||||
}
|
||||
|
||||
// Destination can't exist
|
||||
_, err := vfs.Stat(newname)
|
||||
if err == nil {
|
||||
return nil, EEXIST
|
||||
} else if err != ENOENT {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find the parent
|
||||
dir, leaf, err := vfs.StatParent(newname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the file node
|
||||
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC | o_SYMLINK
|
||||
file, err := dir.Create(leaf, flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Force the file to be a link
|
||||
file.setSymlink()
|
||||
|
||||
// Open the file
|
||||
fh, err := file.Open(flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fs.CheckClose(fh, &err)
|
||||
|
||||
// Write the symlink data
|
||||
_, err = fh.Write([]byte(strings.ReplaceAll(oldname, "\\", "/")))
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Symlink creates newname as a symbolic link to oldname.
|
||||
// On Windows, a symlink to a non-existent oldname creates a file symlink;
|
||||
// if oldname is later created as a directory the symlink will not work.
|
||||
func (vfs *VFS) Symlink(oldname, newname string) error {
|
||||
_, err := vfs.CreateSymlink(oldname, newname)
|
||||
return err
|
||||
}
|
||||
|
|
44
vfs/vfs.md
44
vfs/vfs.md
|
@ -303,6 +303,50 @@ modified files from the cache (the related global flag `--checkers` has no effec
|
|||
|
||||
--transfers int Number of file transfers to run in parallel (default 4)
|
||||
|
||||
### Symlinks
|
||||
|
||||
By default the VFS does not support symlinks. However this may be
|
||||
enabled with either of the following flags:
|
||||
|
||||
--links Translate symlinks to/from regular files with a '.rclonelink' extension.
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
|
||||
As most cloud storage systems do not support symlinks directly, rclone
|
||||
stores the symlink as a normal file with a special extension. So a
|
||||
file which appears as a symlink `link-to-file.txt` would be stored on
|
||||
cloud storage as `link-to-file.txt.rclonelink` and the contents would
|
||||
be the path to the symlink destination.
|
||||
|
||||
Note that `--links` enables symlink translation globally in rclone -
|
||||
this includes any backend which supports the concept (for example the
|
||||
local backend). `--vfs-links` just enables it for the VFS layer.
|
||||
|
||||
This scheme is compatible with that used by the [local backend with the --local-links flag](/local/#symlinks-junction-points).
|
||||
|
||||
The `--vfs-links` flag has been designed for `rclone mount`, `rclone
|
||||
nfsmount` and `rclone serve nfs`.
|
||||
|
||||
It hasn't been tested with the other `rclone serve` commands yet.
|
||||
|
||||
A limitation of the current implementation is that it expects the
|
||||
caller to resolve sub-symlinks. For example given this directory tree
|
||||
|
||||
```
|
||||
.
|
||||
├── dir
|
||||
│ └── file.txt
|
||||
└── linked-dir -> dir
|
||||
```
|
||||
|
||||
The VFS will correctly resolve `linked-dir` but not
|
||||
`linked-dir/file.txt`. This is not a problem for the tested commands
|
||||
but may be for other commands.
|
||||
|
||||
**Note** that there is an outstanding issue with symlink support
|
||||
[issue #8245](https://github.com/rclone/rclone/issues/8245) with duplicate
|
||||
files being created when symlinks are moved into directories where
|
||||
there is a file of the same name (or vice versa).
|
||||
|
||||
### VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package vfscommon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
@ -44,6 +45,11 @@ var OptionsInfo = fs.Options{{
|
|||
Default: false,
|
||||
Help: "Only allow read-only access",
|
||||
Groups: "VFS",
|
||||
}, {
|
||||
Name: "vfs_links",
|
||||
Default: false,
|
||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the VFS",
|
||||
Groups: "VFS",
|
||||
}, {
|
||||
Name: "vfs_cache_mode",
|
||||
Default: CacheModeOff,
|
||||
|
@ -94,6 +100,11 @@ var OptionsInfo = fs.Options{{
|
|||
Default: FileMode(0666),
|
||||
Help: "File permissions",
|
||||
Groups: "VFS",
|
||||
}, {
|
||||
Name: "link_perms",
|
||||
Default: FileMode(0666),
|
||||
Help: "Link permissions",
|
||||
Groups: "VFS",
|
||||
}, {
|
||||
Name: "vfs_case_insensitive",
|
||||
Default: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise,
|
||||
|
@ -165,6 +176,7 @@ type Options struct {
|
|||
NoSeek bool `config:"no_seek"` // don't allow seeking if set
|
||||
NoChecksum bool `config:"no_checksum"` // don't check checksums if set
|
||||
ReadOnly bool `config:"read_only"` // if set VFS is read only
|
||||
Links bool `config:"vfs_links"` // if set interpret link files
|
||||
NoModTime bool `config:"no_modtime"` // don't read mod times for files
|
||||
DirCacheTime fs.Duration `config:"dir_cache_time"` // how long to consider directory listing cache valid
|
||||
Refresh bool `config:"vfs_refresh"` // refreshes the directory listing recursively on start
|
||||
|
@ -174,6 +186,7 @@ type Options struct {
|
|||
GID uint32 `config:"gid"`
|
||||
DirPerms FileMode `config:"dir_perms"`
|
||||
FilePerms FileMode `config:"file_perms"`
|
||||
LinkPerms FileMode `config:"link_perms"`
|
||||
ChunkSize fs.SizeSuffix `config:"vfs_read_chunk_size"` // if > 0 read files in chunks
|
||||
ChunkSizeLimit fs.SizeSuffix `config:"vfs_read_chunk_size_limit"` // if > ChunkSize double the chunk size after each chunk until reached
|
||||
ChunkStreams int `config:"vfs_read_chunk_streams"` // Number of download streams to use
|
||||
|
@ -198,10 +211,21 @@ var Opt Options
|
|||
|
||||
// Init the options, making sure everything is within range
|
||||
func (opt *Options) Init() {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
|
||||
// Override --vfs-links with --links if set
|
||||
if ci.Links {
|
||||
opt.Links = true
|
||||
}
|
||||
|
||||
// Mask the permissions with the umask
|
||||
opt.DirPerms &= ^opt.Umask
|
||||
opt.FilePerms &= ^opt.Umask
|
||||
opt.LinkPerms &= ^opt.Umask
|
||||
|
||||
// Make sure directories are returned as directories
|
||||
opt.DirPerms |= FileMode(os.ModeDir)
|
||||
|
||||
// Make sure links are returned as links
|
||||
opt.LinkPerms |= FileMode(os.ModeSymlink)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -72,3 +73,230 @@ func TestFileModTimeWithOpenWriters(t *testing.T) {
|
|||
|
||||
run.rm(t, "cp-archive-test")
|
||||
}
|
||||
|
||||
// TestSymlinks tests all the api of the VFS / Mount symlinks support
|
||||
func TestSymlinks(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
if !run.vfsOpt.Links {
|
||||
t.Skip("No symlinks configured")
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping test on Windows")
|
||||
}
|
||||
|
||||
fs.Logf(nil, "Links: %v, useVFS: %v, suffix: %v", run.vfsOpt.Links, run.useVFS, fs.LinkSuffix)
|
||||
|
||||
// Create initial setup of test files and directories we will create links to
|
||||
run.mkdir(t, "dir1")
|
||||
run.mkdir(t, "dir1/sub1dir1")
|
||||
run.createFile(t, "dir1/file1", "potato")
|
||||
run.mkdir(t, "dir2")
|
||||
run.mkdir(t, "dir2/sub1dir2")
|
||||
run.createFile(t, "dir2/file1", "chicken")
|
||||
|
||||
// base state all the tests will be build off
|
||||
baseState := "dir1/|dir1/sub1dir1/|dir1/file1 6|dir2/|dir2/sub1dir2/|dir2/file1 7"
|
||||
// Check the tests return to the base state
|
||||
checkBaseState := func() {
|
||||
run.checkDir(t, baseState)
|
||||
}
|
||||
checkBaseState()
|
||||
|
||||
t.Run("FileLink", func(t *testing.T) {
|
||||
// Link to a file
|
||||
run.symlink(t, "dir1/file1", "dir1file1_link")
|
||||
run.checkDir(t, baseState+"|dir1file1_link 10")
|
||||
run.checkMode(t, "dir1file1_link", os.FileMode(run.vfsOpt.LinkPerms), os.FileMode(run.vfsOpt.FilePerms))
|
||||
assert.Equal(t, "dir1/file1", run.readlink(t, "dir1file1_link"))
|
||||
|
||||
// Read through a symlink
|
||||
assert.Equal(t, "potato", run.readFile(t, "dir1file1_link"))
|
||||
|
||||
// Write through a symlink
|
||||
err := writeFile(run.path("dir1file1_link"), []byte("carrot"), 0600)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "carrot", run.readFile(t, "dir1file1_link"))
|
||||
assert.Equal(t, "carrot", run.readFile(t, "dir1/file1"))
|
||||
|
||||
// Rename a symlink
|
||||
err = run.os.Rename(run.path("dir1file1_link"), run.path("dir1file1_link")+"_bla")
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, baseState+"|dir1file1_link_bla 10")
|
||||
assert.Equal(t, "dir1/file1", run.readlink(t, "dir1file1_link_bla"))
|
||||
|
||||
// Delete a symlink
|
||||
run.rm(t, "dir1file1_link_bla")
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
t.Run("DirLink", func(t *testing.T) {
|
||||
// Link to a dir
|
||||
run.symlink(t, "dir1", "dir1_link")
|
||||
run.checkDir(t, baseState+"|dir1_link 4")
|
||||
run.checkMode(t, "dir1_link", os.FileMode(run.vfsOpt.LinkPerms), os.FileMode(run.vfsOpt.DirPerms))
|
||||
assert.Equal(t, "dir1", run.readlink(t, "dir1_link"))
|
||||
|
||||
// Check you can't open a directory symlink
|
||||
_, err := run.os.OpenFile(run.path("dir1_link"), os.O_WRONLY, 0600)
|
||||
require.Error(t, err)
|
||||
|
||||
// Our symlink resolution is very simple when using the VFS as when using the
|
||||
// mount the OS will resolve the symlinks, so we don't recurse here
|
||||
|
||||
// Read entries directly
|
||||
dir1Entries := make(dirMap)
|
||||
run.readLocalEx(t, dir1Entries, "dir1", false)
|
||||
assert.Equal(t, newDirMap("dir1/sub1dir1/|dir1/file1 6"), dir1Entries)
|
||||
|
||||
// Read entries through the directory symlink
|
||||
dir1EntriesSymlink := make(dirMap)
|
||||
run.readLocalEx(t, dir1EntriesSymlink, "dir1_link", false)
|
||||
assert.Equal(t, newDirMap("dir1_link/sub1dir1/|dir1_link/file1 6"), dir1EntriesSymlink)
|
||||
|
||||
// Rename directory symlink
|
||||
err = run.os.Rename(run.path("dir1_link"), run.path("dir1_link")+"_bla")
|
||||
require.NoError(t, err)
|
||||
run.checkDir(t, baseState+"|dir1_link_bla 4")
|
||||
assert.Equal(t, "dir1", run.readlink(t, "dir1_link_bla"))
|
||||
|
||||
// Remove directory symlink
|
||||
run.rm(t, "dir1_link_bla")
|
||||
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Corner case #1 - We do not allow creating regular and symlink files having the same name (ie, test.txt and test.txt.rclonelink)
|
||||
|
||||
// Symlink first, then regular
|
||||
t.Run("OverwriteSymlinkWithRegular", func(t *testing.T) {
|
||||
link1Name := "link1.txt"
|
||||
|
||||
run.symlink(t, "dir1/file1", link1Name)
|
||||
run.checkDir(t, baseState+"|link1.txt 10")
|
||||
|
||||
fh, err := run.os.OpenFile(run.path(link1Name), os.O_WRONLY|os.O_CREATE, os.FileMode(run.vfsOpt.FilePerms))
|
||||
|
||||
// On real mount with links enabled, that open the symlink target as expected, else that fails to create a new file
|
||||
assert.NoError(t, err)
|
||||
// Don't care about the result, in some cache mode the file can't be opened for writing, so closing would trigger an err
|
||||
_ = fh.Close()
|
||||
|
||||
run.rm(t, link1Name)
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Regular first, then symlink
|
||||
t.Run("OverwriteRegularWithSymlink", func(t *testing.T) {
|
||||
link1Name := "link1.txt"
|
||||
|
||||
run.createFile(t, link1Name, "")
|
||||
run.checkDir(t, baseState+"|link1.txt 0")
|
||||
|
||||
err := run.os.Symlink(".", run.path(link1Name))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, link1Name)
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Corner case #2 - We do not allow creating directory and symlink file having the same name (ie, test and test.rclonelink)
|
||||
|
||||
// Symlink first, then directory
|
||||
t.Run("OverwriteSymlinkWithDirectory", func(t *testing.T) {
|
||||
link1Name := "link1"
|
||||
|
||||
run.symlink(t, ".", link1Name)
|
||||
run.checkDir(t, baseState+"|link1 1")
|
||||
|
||||
err := run.os.Mkdir(run.path(link1Name), os.FileMode(run.vfsOpt.DirPerms))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, link1Name)
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Directory first, then symlink
|
||||
t.Run("OverwriteDirectoryWithSymlink", func(t *testing.T) {
|
||||
link1Name := "link1"
|
||||
|
||||
run.mkdir(t, link1Name)
|
||||
run.checkDir(t, baseState+"|link1/")
|
||||
|
||||
err := run.os.Symlink(".", run.path(link1Name))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, link1Name)
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Corner case #3 - We do not allow moving directory or file having the same name in a target (ie, test and test.rclonelink)
|
||||
|
||||
// Move symlink -> regular file
|
||||
t.Run("MoveSymlinkToFile", func(t *testing.T) {
|
||||
t.Skip("FIXME not implemented")
|
||||
link1Name := "link1.txt"
|
||||
|
||||
run.symlink(t, ".", link1Name)
|
||||
run.createFile(t, "dir1/link1.txt", "")
|
||||
run.checkDir(t, baseState+"|link1.txt 1|dir1/link1.txt 0")
|
||||
|
||||
err := run.os.Rename(run.path(link1Name), run.path("dir1/"+link1Name))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, link1Name)
|
||||
run.rm(t, "dir1/link1.txt")
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Move regular file -> symlink
|
||||
t.Run("MoveFileToSymlink", func(t *testing.T) {
|
||||
t.Skip("FIXME not implemented")
|
||||
link1Name := "link1.txt"
|
||||
|
||||
run.createFile(t, link1Name, "")
|
||||
run.symlink(t, ".", "dir1/"+link1Name)
|
||||
run.checkDir(t, baseState+"|link1.txt 0|dir1/link1.txt 1")
|
||||
|
||||
err := run.os.Rename(run.path(link1Name), run.path("dir1/link1.txt"))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, link1Name)
|
||||
run.rm(t, "dir1/"+link1Name)
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Move symlink -> directory
|
||||
t.Run("MoveSymlinkToDirectory", func(t *testing.T) {
|
||||
t.Skip("FIXME not implemented")
|
||||
link1Name := "link1"
|
||||
|
||||
run.symlink(t, ".", link1Name)
|
||||
run.mkdir(t, "dir1/link1")
|
||||
run.checkDir(t, baseState+"|link1 1|dir1/link1/")
|
||||
|
||||
err := run.os.Rename(run.path(link1Name), run.path("dir1/"+link1Name))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, link1Name)
|
||||
run.rm(t, "dir1/link1")
|
||||
checkBaseState()
|
||||
})
|
||||
|
||||
// Move directory -> symlink
|
||||
t.Run("MoveDirectoryToSymlink", func(t *testing.T) {
|
||||
t.Skip("FIXME not implemented")
|
||||
link1Name := "dir1/link1"
|
||||
|
||||
run.mkdir(t, "link1")
|
||||
run.symlink(t, ".", link1Name)
|
||||
run.checkDir(t, baseState+"|link1/|dir1/link1 1")
|
||||
|
||||
err := run.os.Rename(run.path("link1"), run.path("dir1/link1"))
|
||||
assert.Error(t, err)
|
||||
|
||||
run.rm(t, "link1")
|
||||
run.rm(t, link1Name)
|
||||
checkBaseState()
|
||||
})
|
||||
}
|
||||
|
|
|
@ -49,12 +49,15 @@ func RunTests(t *testing.T, useVFS bool, minimumRequiredCacheMode vfscommon.Cach
|
|||
tests := []struct {
|
||||
cacheMode vfscommon.CacheMode
|
||||
writeBack fs.Duration
|
||||
links bool
|
||||
}{
|
||||
{cacheMode: vfscommon.CacheModeOff},
|
||||
{cacheMode: vfscommon.CacheModeOff, links: true},
|
||||
{cacheMode: vfscommon.CacheModeMinimal},
|
||||
{cacheMode: vfscommon.CacheModeWrites},
|
||||
{cacheMode: vfscommon.CacheModeFull},
|
||||
{cacheMode: vfscommon.CacheModeFull, writeBack: fs.Duration(100 * time.Millisecond)},
|
||||
{cacheMode: vfscommon.CacheModeFull, writeBack: fs.Duration(100 * time.Millisecond), links: true},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if test.cacheMode < minimumRequiredCacheMode {
|
||||
|
@ -63,11 +66,15 @@ func RunTests(t *testing.T, useVFS bool, minimumRequiredCacheMode vfscommon.Cach
|
|||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.CacheMode = test.cacheMode
|
||||
vfsOpt.WriteBack = test.writeBack
|
||||
vfsOpt.Links = test.links
|
||||
run = newRun(useVFS, &vfsOpt, mountFn)
|
||||
what := fmt.Sprintf("CacheMode=%v", test.cacheMode)
|
||||
if test.writeBack > 0 {
|
||||
what += fmt.Sprintf(",WriteBack=%v", test.writeBack)
|
||||
}
|
||||
if test.links {
|
||||
what += fmt.Sprintf(",Links=%v", test.links)
|
||||
}
|
||||
fs.Logf(nil, "Starting test run with %s", what)
|
||||
ok := t.Run(what, func(t *testing.T) {
|
||||
t.Run("TestTouchAndDelete", TestTouchAndDelete)
|
||||
|
@ -98,6 +105,7 @@ func RunTests(t *testing.T, useVFS bool, minimumRequiredCacheMode vfscommon.Cach
|
|||
t.Run("TestWriteFileFsync", TestWriteFileFsync)
|
||||
t.Run("TestWriteFileDup", TestWriteFileDup)
|
||||
t.Run("TestWriteFileAppend", TestWriteFileAppend)
|
||||
t.Run("TestSymlinks", TestSymlinks)
|
||||
})
|
||||
fs.Logf(nil, "Finished test run with %s (ok=%v)", what, ok)
|
||||
run.Finalise()
|
||||
|
@ -213,10 +221,16 @@ func newDirMap(dirString string) (dm dirMap) {
|
|||
}
|
||||
|
||||
// Returns a dirmap with only the files in
|
||||
func (dm dirMap) filesOnly() dirMap {
|
||||
func (dm dirMap) filesOnly(stripLinksSuffix bool) dirMap {
|
||||
newDm := make(dirMap)
|
||||
for name := range dm {
|
||||
if !strings.HasSuffix(name, "/") {
|
||||
if stripLinksSuffix {
|
||||
index := strings.LastIndex(name, " ")
|
||||
if index != -1 {
|
||||
name = strings.TrimSuffix(name[0:index], fs.LinkSuffix) + name[index:]
|
||||
}
|
||||
}
|
||||
newDm[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -224,7 +238,9 @@ func (dm dirMap) filesOnly() dirMap {
|
|||
}
|
||||
|
||||
// reads the local tree into dir
|
||||
func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
|
||||
//
|
||||
// If recurse it set it will recurse into subdirectories
|
||||
func (r *Run) readLocalEx(t *testing.T, dir dirMap, filePath string, recurse bool) {
|
||||
realPath := r.path(filePath)
|
||||
files, err := r.os.ReadDir(realPath)
|
||||
require.NoError(t, err)
|
||||
|
@ -232,15 +248,26 @@ func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
|
|||
name := path.Join(filePath, fi.Name())
|
||||
if fi.IsDir() {
|
||||
dir[name+"/"] = struct{}{}
|
||||
r.readLocal(t, dir, name)
|
||||
if recurse {
|
||||
r.readLocalEx(t, dir, name, recurse)
|
||||
}
|
||||
assert.Equal(t, os.FileMode(r.vfsOpt.DirPerms)&os.ModePerm, fi.Mode().Perm())
|
||||
} else {
|
||||
dir[fmt.Sprintf("%s %d", name, fi.Size())] = struct{}{}
|
||||
assert.Equal(t, os.FileMode(r.vfsOpt.FilePerms)&os.ModePerm, fi.Mode().Perm())
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
assert.Equal(t, os.FileMode(r.vfsOpt.LinkPerms)&os.ModePerm, fi.Mode().Perm())
|
||||
} else {
|
||||
assert.Equal(t, os.FileMode(r.vfsOpt.FilePerms)&os.ModePerm, fi.Mode().Perm())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reads the local tree into dir
|
||||
func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
|
||||
r.readLocalEx(t, dir, filePath, true)
|
||||
}
|
||||
|
||||
// reads the remote tree into dir
|
||||
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
|
||||
objs, dirs, err := walk.GetAll(context.Background(), r.fremote, filepath, true, 1)
|
||||
|
@ -271,7 +298,7 @@ func (r *Run) checkDir(t *testing.T, dirString string) {
|
|||
remoteDm = make(dirMap)
|
||||
r.readRemote(t, remoteDm, "")
|
||||
// Ignore directories for remote compare
|
||||
remoteOK = reflect.DeepEqual(dm.filesOnly(), remoteDm.filesOnly())
|
||||
remoteOK = reflect.DeepEqual(dm.filesOnly(run.vfsOpt.Links), remoteDm.filesOnly(run.vfsOpt.Links))
|
||||
fuseOK = reflect.DeepEqual(dm, localDm)
|
||||
if remoteOK && fuseOK {
|
||||
return
|
||||
|
@ -280,7 +307,7 @@ func (r *Run) checkDir(t *testing.T, dirString string) {
|
|||
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
assert.Equal(t, dm.filesOnly(), remoteDm.filesOnly(), "expected vs remote")
|
||||
assert.Equal(t, dm.filesOnly(run.vfsOpt.Links), remoteDm.filesOnly(run.vfsOpt.Links), "expected vs remote")
|
||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||
}
|
||||
|
||||
|
@ -353,6 +380,37 @@ func (r *Run) rmdir(t *testing.T, filepath string) {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *Run) symlink(t *testing.T, oldname, newname string) {
|
||||
newname = r.path(newname)
|
||||
err := r.os.Symlink(oldname, newname)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *Run) checkMode(t *testing.T, name string, lexpected os.FileMode, expected os.FileMode) {
|
||||
if r.useVFS {
|
||||
info, err := run.os.Stat(run.path(name))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lexpected, info.Mode())
|
||||
assert.Equal(t, name, info.Name())
|
||||
} else {
|
||||
info, err := os.Lstat(run.path(name))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lexpected, info.Mode())
|
||||
assert.Equal(t, name, info.Name())
|
||||
|
||||
info, err = run.os.Stat(run.path(name))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, info.Mode())
|
||||
assert.Equal(t, name, info.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Run) readlink(t *testing.T, name string) string {
|
||||
result, err := r.os.Readlink(r.path(name))
|
||||
require.NoError(t, err)
|
||||
return result
|
||||
}
|
||||
|
||||
// TestMount checks that the Fs is mounted by seeing if the mountpoint
|
||||
// is in the mount output
|
||||
func TestMount(t *testing.T) {
|
||||
|
|
|
@ -22,6 +22,8 @@ type Oser interface {
|
|||
Remove(name string) error
|
||||
Rename(oldName, newName string) error
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
Symlink(oldname, newname string) error
|
||||
Readlink(name string) (s string, err error)
|
||||
}
|
||||
|
||||
// realOs is an implementation of Oser backed by the "os" package
|
||||
|
@ -130,6 +132,16 @@ func (r realOs) Stat(path string) (os.FileInfo, error) {
|
|||
return os.Stat(path)
|
||||
}
|
||||
|
||||
// Symlink
|
||||
func (r realOs) Symlink(oldname, newname string) error {
|
||||
return os.Symlink(oldname, newname)
|
||||
}
|
||||
|
||||
// Readlink
|
||||
func (r realOs) Readlink(name string) (s string, err error) {
|
||||
return os.Readlink(name)
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ Oser = &realOs{}
|
||||
var _ vfs.Handle = &realOsFile{}
|
||||
|
|
|
@ -37,6 +37,9 @@ var (
|
|||
)
|
||||
|
||||
func newWriteFileHandle(d *Dir, f *File, remote string, flags int) (*WriteFileHandle, error) {
|
||||
if f.IsSymlink() {
|
||||
remote += fs.LinkSuffix
|
||||
}
|
||||
fh := &WriteFileHandle{
|
||||
remote: remote,
|
||||
flags: flags,
|
||||
|
|
Loading…
Reference in a new issue