Break the fs package up into smaller parts.

The purpose of this is to make it easier to maintain and eventually to
allow the rclone backends to be re-used in other projects without
having to use the rclone configuration system.

The new code layout is documented in CONTRIBUTING.
This commit is contained in:
Nick Craig-Wood 2018-01-12 16:30:54 +00:00
parent 92624bbbf1
commit 11da2a6c9b
183 changed files with 5749 additions and 5063 deletions

View file

@ -24,6 +24,11 @@ import (
"github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
@ -46,7 +51,7 @@ const (
var (
// Flags
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
uploadWaitPerGB = fs.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@ -73,20 +78,20 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Amazon Application Client Id - required.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret - required.",
}, {
Name: fs.ConfigAuthURL,
Name: config.ConfigAuthURL,
Help: "Auth server URL - leave blank to use Amazon's.",
}, {
Name: fs.ConfigTokenURL,
Name: config.ConfigTokenURL,
Help: "Token server url - leave blank to use Amazon's.",
}},
})
fs.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
}
// Fs represents a remote acd server
@ -171,7 +176,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
return true, err
}
}
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// If query parameters contain X-Amz-Algorithm remove Authorization header
@ -193,7 +198,7 @@ func filterRequest(req *http.Request) {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
baseClient := fs.Config.Client()
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
@ -212,7 +217,7 @@ func NewFs(name, root string) (fs.Fs, error) {
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fs.Config.Client(),
noAuthClient: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@ -472,7 +477,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if iErr != nil {
return nil, iErr
}
if fs.IsRetryError(err) {
if fserrors.IsRetryError(err) {
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
continue
}
@ -875,8 +880,8 @@ func (f *Fs) Precision() time.Duration {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// Copy src to this remote using server side copy operations.
@ -932,9 +937,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
return *o.info.ContentProperties.Md5, nil

View file

@ -11,7 +11,7 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
"hash"
gohash "hash"
"io"
"net/http"
"path"
@ -23,6 +23,12 @@ import (
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
)
@ -66,8 +72,8 @@ func init() {
},
},
})
fs.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
fs.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
flags.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
flags.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
}
// Fs represents a remote azure server
@ -165,7 +171,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
}
}
}
return fs.ShouldRetry(err), err
return fserrors.ShouldRetry(err), err
}
// NewFs contstructs an Fs from the path, container:path
@ -180,11 +186,11 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
return nil, err
}
account := fs.ConfigFileGet(name, "account")
account := config.FileGet(name, "account")
if account == "" {
return nil, errors.New("account not found")
}
key := fs.ConfigFileGet(name, "key")
key := config.FileGet(name, "key")
if key == "" {
return nil, errors.New("key not found")
}
@ -193,13 +199,13 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, errors.Errorf("malformed storage account key: %v", err)
}
endpoint := fs.ConfigFileGet(name, "endpoint", storage.DefaultBaseURL)
endpoint := config.FileGet(name, "endpoint", storage.DefaultBaseURL)
client, err := storage.NewClient(account, key, endpoint, apiVersion, true)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage client")
}
client.HTTPClient = fs.Config.Client()
client.HTTPClient = fshttp.NewClient(fs.Config)
bc := client.GetBlobService()
f := &Fs{
@ -473,7 +479,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.container == "" {
return fs.ErrorListBucketRequired
}
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
err = f.list(dir, true, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
@ -622,8 +628,8 @@ func (f *Fs) Precision() time.Duration {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// Purge deletes all the files and directories including the old versions.
@ -690,9 +696,9 @@ func (o *Object) Remote() string {
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
// Convert base64 encoded md5 into lower case hex
if o.md5 == "" {
@ -834,7 +840,7 @@ type openFile struct {
o *Object // Object we are reading for
resp *http.Response // response of the GET
body io.Reader // reading from here
hash hash.Hash // currently accumulating MD5
hash gohash.Hash // currently accumulating MD5
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
@ -1059,7 +1065,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
size := src.Size()
blob := o.getBlobWithModTime(src.ModTime())
blob.Properties.ContentType = fs.MimeType(o)
if sourceMD5, _ := src.Hash(fs.HashMD5); sourceMD5 != "" {
if sourceMD5, _ := src.Hash(hash.HashMD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
blob.Properties.ContentMD5 = base64.StdEncoding.EncodeToString(sourceMD5bytes)

View file

@ -7,7 +7,7 @@ import (
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
)
// Error describes a B2 error response
@ -29,7 +29,7 @@ func (e *Error) Fatal() bool {
return e.Status == 403 // 403 errors shouldn't be retried
}
var _ fs.Fataler = (*Error)(nil)
var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {

View file

@ -9,7 +9,7 @@ import (
"bytes"
"crypto/sha1"
"fmt"
"hash"
gohash "hash"
"io"
"net/http"
"path"
@ -21,6 +21,13 @@ import (
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
@ -48,9 +55,9 @@ var (
minChunkSize = fs.SizeSuffix(5E6)
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
uploadCutoff = fs.SizeSuffix(200E6)
b2TestMode = fs.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
b2Versions = fs.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
b2HardDelete = fs.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
)
@ -72,8 +79,8 @@ func init() {
},
},
})
fs.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
fs.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
}
// Fs represents a remote b2 server
@ -186,7 +193,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
}
return true, err
}
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// shouldRetry returns a boolean as to whether this resp and err
@ -236,15 +243,15 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
return nil, err
}
account := fs.ConfigFileGet(name, "account")
account := config.FileGet(name, "account")
if account == "" {
return nil, errors.New("account not found")
}
key := fs.ConfigFileGet(name, "key")
key := config.FileGet(name, "key")
if key == "" {
return nil, errors.New("key not found")
}
endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint)
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
f := &Fs{
name: name,
bucket: bucket,
@ -252,7 +259,7 @@ func NewFs(name, root string) (fs.Fs, error) {
account: account,
key: key,
endpoint: endpoint,
srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
bufferTokens: make(chan []byte, fs.Config.Transfers),
}
@ -615,7 +622,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
last := ""
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
@ -868,16 +875,16 @@ func (f *Fs) purge(oldOnly bool) error {
go func() {
defer wg.Done()
for object := range toBeDeleted {
fs.Stats.Checking(object.Name)
accounting.Stats.Checking(object.Name)
checkErr(f.deleteByID(object.ID, object.Name))
fs.Stats.DoneChecking(object.Name)
accounting.Stats.DoneChecking(object.Name)
}
}()
}
last := ""
checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
fs.Stats.Checking(remote)
accounting.Stats.Checking(remote)
if oldOnly && last != remote {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
@ -890,7 +897,7 @@ func (f *Fs) purge(oldOnly bool) error {
toBeDeleted <- object
}
last = remote
fs.Stats.DoneChecking(remote)
accounting.Stats.DoneChecking(remote)
}
return nil
}))
@ -914,8 +921,8 @@ func (f *Fs) CleanUp() error {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashSHA1)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashSHA1)
}
// ------------------------------------------------------------
@ -939,9 +946,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Sha-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashSHA1 {
return "", hash.ErrHashUnsupported
}
if o.sha1 == "" {
// Error is logged in readMetaData
@ -1094,7 +1101,7 @@ type openFile struct {
o *Object // Object we are reading for
resp *http.Response // response of the GET
body io.Reader // reading from here
hash hash.Hash // currently accumulating SHA1
hash gohash.Hash // currently accumulating SHA1
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
@ -1279,7 +1286,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
modTime := src.ModTime()
calculatedSha1, _ := src.Hash(fs.HashSHA1)
calculatedSha1, _ := src.Hash(hash.HashSHA1)
if calculatedSha1 == "" {
calculatedSha1 = "hex_digits_at_end"
har := newHashAppendingReader(in, sha1.New())

View file

@ -9,19 +9,21 @@ import (
"crypto/sha1"
"encoding/hex"
"fmt"
"hash"
gohash "hash"
"io"
"strings"
"sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
type hashAppendingReader struct {
h hash.Hash
h gohash.Hash
in io.Reader
hexSum string
hexReader io.Reader
@ -58,7 +60,7 @@ func (har *hashAppendingReader) HexSum() string {
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
// after the original reader reaches EOF. The increased size depends on the
// given hash, which may be queried through AdditionalLength()
func newHashAppendingReader(in io.Reader, h hash.Hash) *hashAppendingReader {
func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
withHash := io.TeeReader(in, h)
return &hashAppendingReader{h: h, in: withHash}
}
@ -113,7 +115,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
},
}
// Set the SHA1 if known
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
if calculatedSha1, err := src.Hash(hash.HashSHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
@ -219,7 +221,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
opts := rest.Opts{
Method: "POST",
RootURL: upload.UploadURL,
Body: fs.AccountPart(up.o, in),
Body: accounting.AccountPart(up.o, in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
@ -329,7 +331,7 @@ func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
accounting.AccountByPart(up.o) // Cancel whole file accounting before reading
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
@ -390,7 +392,7 @@ func (up *largeUpload) Upload() error {
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
accounting.AccountByPart(up.o) // Cancel whole file accounting before reading
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors

View file

@ -22,9 +22,11 @@ import (
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/box/api"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
@ -56,7 +58,7 @@ var (
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
@ -75,14 +77,14 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Box App Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Box App Client Secret - leave blank normally.",
}},
})
fs.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
}
// Fs represents a remote box
@ -160,7 +162,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRety || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
@ -827,8 +829,8 @@ func (f *Fs) DirCacheFlush() {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashSHA1)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashSHA1)
}
// ------------------------------------------------------------
@ -857,9 +859,9 @@ func (o *Object) srvPath() string {
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashSHA1 {
return "", hash.ErrHashUnsupported
}
return o.sha1, nil
}

View file

@ -18,6 +18,10 @@ import (
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/time/rate"
@ -47,18 +51,18 @@ const (
// Globals
var (
// Flags
cacheDbPath = fs.StringP("cache-db-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cache DB")
cacheChunkPath = fs.StringP("cache-chunk-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cached chunk files")
cacheDbPurge = fs.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
cacheChunkSize = fs.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
cacheTotalChunkSize = fs.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
cacheChunkCleanInterval = fs.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
cacheInfoAge = fs.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
cacheReadRetries = fs.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
cacheTotalWorkers = fs.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
cacheChunkNoMemory = fs.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
cacheRps = fs.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
cacheStoreWrites = fs.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
cacheDbPath = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB")
cacheChunkPath = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files")
cacheDbPurge = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
cacheChunkSize = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
cacheTotalChunkSize = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
cacheInfoAge = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
cacheReadRetries = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
cacheTotalWorkers = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
cacheChunkNoMemory = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
cacheRps = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
cacheStoreWrites = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
)
// Register with Fs
@ -223,7 +227,7 @@ type Fs struct {
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string) (fs.Fs, error) {
remote := fs.ConfigFileGet(name, "remote")
remote := config.FileGet(name, "remote")
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
}
@ -235,10 +239,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
}
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
plexURL := fs.ConfigFileGet(name, "plex_url")
plexToken := fs.ConfigFileGet(name, "plex_token")
plexURL := config.FileGet(name, "plex_url")
plexToken := config.FileGet(name, "plex_token")
var chunkSize fs.SizeSuffix
chunkSizeString := fs.ConfigFileGet(name, "chunk_size", DefCacheChunkSize)
chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize)
if *cacheChunkSize != DefCacheChunkSize {
chunkSizeString = *cacheChunkSize
}
@ -247,7 +251,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to understand chunk size", chunkSizeString)
}
var chunkTotalSize fs.SizeSuffix
chunkTotalSizeString := fs.ConfigFileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
if *cacheTotalChunkSize != DefCacheTotalChunkSize {
chunkTotalSizeString = *cacheTotalChunkSize
}
@ -260,7 +264,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
}
infoAge := fs.ConfigFileGet(name, "info_age", DefCacheInfoAge)
infoAge := config.FileGet(name, "info_age", DefCacheInfoAge)
if *cacheInfoAge != DefCacheInfoAge {
infoAge = *cacheInfoAge
}
@ -301,10 +305,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
}
} else {
plexUsername := fs.ConfigFileGet(name, "plex_username")
plexPassword := fs.ConfigFileGet(name, "plex_password")
plexUsername := config.FileGet(name, "plex_username")
plexPassword := config.FileGet(name, "plex_password")
if plexPassword != "" && plexUsername != "" {
decPass, err := fs.Reveal(plexPassword)
decPass, err := config.Reveal(plexPassword)
if err != nil {
decPass = plexPassword
}
@ -319,8 +323,8 @@ func NewFs(name, rpath string) (fs.Fs, error) {
dbPath := *cacheDbPath
chunkPath := *cacheChunkPath
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
if dbPath != filepath.Join(fs.CacheDir, "cache-backend") &&
chunkPath == filepath.Join(fs.CacheDir, "cache-backend") {
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
chunkPath = dbPath
}
if filepath.Ext(dbPath) != "" {
@ -506,7 +510,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return cachedEntries, nil
}
func (f *Fs) recurse(dir string, list *fs.ListRHelper) error {
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
entries, err := f.List(dir)
if err != nil {
return err
@ -558,7 +562,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
err = f.recurse(dir, list)
if err != nil {
return err
@ -895,7 +899,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
func (f *Fs) Hashes() hash.Set {
return f.Fs.Hashes()
}

View file

@ -20,6 +20,8 @@ import (
//"strings"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/object"
//"github.com/ncw/rclone/cmd/mount"
//_ "github.com/ncw/rclone/cmd/cmount"
//"github.com/ncw/rclone/cmd/mountlib"
@ -492,7 +494,7 @@ func writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object
func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data)
modTime := time.Now()
objInfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
objInfo := object.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
obj, err := f.Put(in, objInfo)
require.NoError(t, err)
@ -503,8 +505,8 @@ func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Obje
func updateObjectBytes(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err := f.Put(in1, objInfo1)
require.NoError(t, err)
@ -540,15 +542,15 @@ func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
localExists := false
cacheExists := false
cryptExists := false
for _, s := range fs.ConfigFileSections() {
for _, s := range config.FileSections() {
if s == localRemote {
localExists = true
}
@ -563,28 +565,28 @@ func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote st
localRemoteWrap := ""
if !localExists {
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
fs.ConfigFileSet(localRemote, "type", "local")
fs.ConfigFileSet(localRemote, "nounc", "true")
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
}
if !cacheExists {
fs.ConfigFileSet(cacheRemote, "type", "cache")
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
config.FileSet(cacheRemote, "type", "cache")
config.FileSet(cacheRemote, "remote", localRemoteWrap)
}
if c, ok := cfg["chunk_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
config.FileSet(cacheRemote, "chunk_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
config.FileSet(cacheRemote, "chunk_size", "1m")
}
if c, ok := cfg["chunk_total_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
config.FileSet(cacheRemote, "chunk_total_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
config.FileSet(cacheRemote, "chunk_total_size", "2m")
}
if c, ok := cfg["info_age"]; ok {
fs.ConfigFileSet(cacheRemote, "info_age", c)
config.FileSet(cacheRemote, "info_age", c)
} else {
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
config.FileSet(cacheRemote, "info_age", infoAge.String())
}
if !cryptExists {
@ -627,14 +629,14 @@ func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote st
func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
localExists := false
cacheExists := false
for _, s := range fs.ConfigFileSections() {
for _, s := range config.FileSections() {
if s == localRemote {
localExists = true
}
@ -646,28 +648,28 @@ func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[stri
localRemoteWrap := ""
if !localExists {
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
fs.ConfigFileSet(localRemote, "type", "local")
fs.ConfigFileSet(localRemote, "nounc", "true")
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
}
if !cacheExists {
fs.ConfigFileSet(cacheRemote, "type", "cache")
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
config.FileSet(cacheRemote, "type", "cache")
config.FileSet(cacheRemote, "remote", localRemoteWrap)
}
if c, ok := cfg["chunk_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
config.FileSet(cacheRemote, "chunk_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
config.FileSet(cacheRemote, "chunk_size", "1m")
}
if c, ok := cfg["chunk_total_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
config.FileSet(cacheRemote, "chunk_total_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
config.FileSet(cacheRemote, "chunk_total_size", "2m")
}
if c, ok := cfg["info_age"]; ok {
fs.ConfigFileSet(cacheRemote, "info_age", c)
config.FileSet(cacheRemote, "info_age", c)
} else {
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
config.FileSet(cacheRemote, "info_age", infoAge.String())
}
if c, ok := cfg["cache-chunk-no-memory"]; ok {

View file

@ -13,21 +13,22 @@ import (
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
)
// Object is a generic file like object that stores basic information about it
type Object struct {
fs.Object `json:"-"`
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashes map[fs.HashType]string // all supported hashes cached
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex
}
@ -80,10 +81,10 @@ func (o *Object) UnmarshalJSON(b []byte) error {
return err
}
o.cacheHashes = make(map[fs.HashType]string)
o.cacheHashes = make(map[hash.Type]string)
for k, v := range aux.Hashes {
ht, _ := strconv.Atoi(k)
o.cacheHashes[fs.HashType(ht)] = v
o.cacheHashes[hash.Type(ht)] = v
}
return nil
@ -112,7 +113,7 @@ func (o *Object) updateData(source fs.Object) {
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.cacheHashes = make(map[fs.HashType]string)
o.cacheHashes = make(map[hash.Type]string)
}
// Fs returns its FS info
@ -251,7 +252,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
o.cacheHashes = make(map[fs.HashType]string)
o.cacheHashes = make(map[hash.Type]string)
o.persist()
return nil
@ -274,9 +275,9 @@ func (o *Object) Remove() error {
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ht fs.HashType) (string, error) {
func (o *Object) Hash(ht hash.Type) (string, error) {
if o.cacheHashes == nil {
o.cacheHashes = make(map[fs.HashType]string)
o.cacheHashes = make(map[hash.Type]string)
}
cachedHash, found := o.cacheHashes[ht]

View file

@ -13,6 +13,7 @@ import (
"sync"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
)
const (
@ -107,8 +108,8 @@ func (p *plexConnector) authenticate() error {
}
p.token = token
if p.token != "" {
fs.ConfigFileSet(p.f.Name(), "plex_token", p.token)
fs.SaveConfig()
config.FileSet(p.f.Name(), "plex_token", p.token)
config.SaveConfig()
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}

View file

@ -10,13 +10,16 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Globals
var (
// Flags
cryptShowMapping = fs.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
)
// Register with Fs
@ -71,25 +74,25 @@ func init() {
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string) (fs.Fs, error) {
mode, err := NewNameEncryptionMode(fs.ConfigFileGet(name, "filename_encryption", "standard"))
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
if err != nil {
return nil, err
}
dirNameEncrypt, err := strconv.ParseBool(fs.ConfigFileGet(name, "directory_name_encryption", "true"))
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
if err != nil {
return nil, err
}
password := fs.ConfigFileGet(name, "password", "")
password := config.FileGet(name, "password", "")
if password == "" {
return nil, errors.New("password not set in config file")
}
password, err = fs.Reveal(password)
password, err = config.Reveal(password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
}
salt := fs.ConfigFileGet(name, "password2", "")
salt := config.FileGet(name, "password2", "")
if salt != "" {
salt, err = fs.Reveal(salt)
salt, err = config.Reveal(salt)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
}
@ -98,7 +101,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
}
remote := fs.ConfigFileGet(name, "remote")
remote := config.FileGet(name, "remote")
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
@ -305,8 +308,8 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashNone)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashNone)
}
// Mkdir makes the directory (container, bucket)
@ -459,7 +462,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash string, err error) {
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in
in, err := o.Open()
if err != nil {
@ -499,7 +502,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash s
}
// pipe into hash
m := fs.NewMultiHasher()
m := hash.NewMultiHasher()
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
@ -558,8 +561,8 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(hash fs.HashType) (string, error) {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrHashUnsupported
}
// UnWrap returns the wrapped Object
@ -652,7 +655,7 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(hash fs.HashType) (string, error) {
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil
}

View file

@ -4,7 +4,7 @@ import (
"os"
"path/filepath"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fstest/fstests"
)
@ -19,15 +19,15 @@ func init() {
fstests.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: fs.MustObscure("potato")},
{Name: name, Key: "password", Value: config.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name2, Key: "type", Value: "crypt"},
{Name: name2, Key: "remote", Value: tempdir2},
{Name: name2, Key: "password", Value: fs.MustObscure("potato2")},
{Name: name2, Key: "password", Value: config.MustObscure("potato2")},
{Name: name2, Key: "filename_encryption", Value: "off"},
{Name: name3, Key: "type", Value: "crypt"},
{Name: name3, Key: "remote", Value: tempdir3},
{Name: name3, Key: "password", Value: fs.MustObscure("potato2")},
{Name: name3, Key: "password", Value: config.MustObscure("potato2")},
{Name: name3, Key: "filename_encryption", Value: "obfuscate"},
}
fstests.SkipBadWindowsCharacters[name3+":"] = true

View file

@ -21,11 +21,15 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/drive/v2"
@ -46,13 +50,13 @@ const (
// Globals
var (
// Flags
driveAuthOwnerOnly = fs.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
driveUseTrash = fs.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
driveSkipGdocs = fs.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
driveSharedWithMe = fs.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
driveTrashedOnly = fs.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
driveExtensions = fs.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
driveListChunk = pflag.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
@ -62,7 +66,7 @@ var (
Scopes: []string{"https://www.googleapis.com/auth/drive"},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
mimeTypeToExtension = map[string]string{
@ -99,7 +103,7 @@ func init() {
NewFs: NewFs,
Config: func(name string) {
var err error
if fs.ConfigFileGet(name, "service_account_file") == "" {
if config.FileGet(name, "service_account_file") == "" {
err = oauthutil.Config("drive", name, driveConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
@ -111,18 +115,18 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
}},
})
fs.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
fs.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
// Invert mimeTypeToExtension
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
@ -185,7 +189,7 @@ func (f *Fs) Features() *fs.Features {
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
if fs.ShouldRetry(err) {
if fserrors.ShouldRetry(err) {
again = true
} else {
switch gerr := err.(type) {
@ -337,13 +341,13 @@ func (f *Fs) parseExtensions(extensions string) error {
// Figure out if the user wants to use a team drive
func configTeamDrive(name string) error {
teamDrive := fs.ConfigFileGet(name, "team_drive")
teamDrive := config.FileGet(name, "team_drive")
if teamDrive == "" {
fmt.Printf("Configure this as a team drive?\n")
} else {
fmt.Printf("Change current team drive ID %q?\n", teamDrive)
}
if !fs.Confirm() {
if !config.Confirm() {
return nil
}
client, err := authenticate(name)
@ -379,9 +383,9 @@ func configTeamDrive(name string) error {
if len(driveIDs) == 0 {
fmt.Printf("No team drives found in your account")
} else {
driveID = fs.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
}
fs.ConfigFileSet(name, "team_drive", driveID)
config.FileSet(name, "team_drive", driveID)
return nil
}
@ -399,7 +403,7 @@ func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(fs.Config.Client())
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@ -407,7 +411,7 @@ func authenticate(name string) (*http.Client, error) {
var oAuthClient *http.Client
var err error
serviceAccountPath := fs.ConfigFileGet(name, "service_account_file")
serviceAccountPath := config.FileGet(name, "service_account_file")
if serviceAccountPath != "" {
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
if err != nil {
@ -444,7 +448,7 @@ func NewFs(name, path string) (fs.Fs, error) {
root: root,
pacer: newPacer(),
}
f.teamDriveID = fs.ConfigFileGet(name, "team_drive")
f.teamDriveID = config.FileGet(name, "team_drive")
f.isTeamDrive = f.teamDriveID != ""
f.features = (&fs.Features{
DuplicateFiles: true,
@ -1188,8 +1192,8 @@ func (f *Fs) DirCacheFlush() {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// ------------------------------------------------------------
@ -1213,9 +1217,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
return o.md5sum, nil
}

View file

@ -20,6 +20,8 @@ import (
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/api/drive/v2"
"google.golang.org/api/googleapi"
@ -201,7 +203,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
chunk := fs.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
@ -241,7 +243,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

View file

@ -34,8 +34,13 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
@ -59,7 +64,7 @@ var (
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
@ -112,7 +117,7 @@ func init() {
Help: "Dropbox App Secret - leave blank normally.",
}},
})
fs.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
}
// Fs represents a remote dropbox server
@ -170,7 +175,7 @@ func shouldRetry(err error) (bool, error) {
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return true, err
}
return fs.ShouldRetry(err), err
return fserrors.ShouldRetry(err), err
}
// NewFs contstructs an Fs from the path, container:path
@ -181,11 +186,11 @@ func NewFs(name, root string) (fs.Fs, error) {
// Convert the old token if it exists. The old token was just
// just a string, the new one is a JSON blob
oldToken := strings.TrimSpace(fs.ConfigFileGet(name, fs.ConfigToken))
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
if oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := fs.ConfigSetValueAndSave(name, fs.ConfigToken, newToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, errors.Wrap(err, "NewFS convert token")
}
@ -675,8 +680,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashDropbox)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashDropbox)
}
// ------------------------------------------------------------
@ -700,9 +705,9 @@ func (o *Object) Remote() string {
}
// Hash returns the dropbox special hash
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashDropbox {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashDropbox {
return "", hash.ErrHashUnsupported
}
err := o.readMetaData()
if err != nil {
@ -813,7 +818,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
case files.DownloadAPIError:
// Don't attempt to retry copyright violation errors
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
return nil, fs.NoRetryError(err)
return nil, fserrors.NoRetryError(err)
}
}
@ -831,7 +836,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := fs.NewCountingReader(in0)
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
@ -847,7 +852,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// write the first chunk
fmtChunk(1, false)
var res *files.UploadSessionStartResult
chunk := fs.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, 0); err != nil {
@ -883,7 +888,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
}
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = fs.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, 0); err != nil {
@ -906,7 +911,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
Commit: commitInfo,
}
fmtChunk(currentChunk, true)
chunk = fs.NewRepeatableReaderBuffer(in, buf)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, 0); err != nil {

View file

@ -13,6 +13,8 @@ import (
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
@ -160,33 +162,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
func NewFs(name, root string) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// FIXME Convert the old scheme used for the first beta - remove after release
if ftpURL := fs.ConfigFileGet(name, "url"); ftpURL != "" {
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
fs.Infof(name, "Converting old configuration")
u, err := url.Parse(ftpURL)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
}
parts := strings.Split(u.Host, ":")
fs.ConfigFileSet(name, "host", parts[0])
config.FileSet(name, "host", parts[0])
if len(parts) > 1 {
fs.ConfigFileSet(name, "port", parts[1])
config.FileSet(name, "port", parts[1])
}
fs.ConfigFileSet(name, "host", u.Host)
fs.ConfigFileSet(name, "user", fs.ConfigFileGet(name, "username"))
fs.ConfigFileSet(name, "pass", fs.ConfigFileGet(name, "password"))
fs.ConfigFileDeleteKey(name, "username")
fs.ConfigFileDeleteKey(name, "password")
fs.ConfigFileDeleteKey(name, "url")
fs.SaveConfig()
config.FileSet(name, "host", u.Host)
config.FileSet(name, "user", config.FileGet(name, "username"))
config.FileSet(name, "pass", config.FileGet(name, "password"))
config.FileDeleteKey(name, "username")
config.FileDeleteKey(name, "password")
config.FileDeleteKey(name, "url")
config.SaveConfig()
if u.Path != "" && u.Path != "/" {
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
}
}
host := fs.ConfigFileGet(name, "host")
user := fs.ConfigFileGet(name, "user")
pass := fs.ConfigFileGet(name, "pass")
port := fs.ConfigFileGet(name, "port")
pass, err = fs.Reveal(pass)
host := config.FileGet(name, "host")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
port := config.FileGet(name, "port")
pass, err = config.Reveal(pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
}
@ -346,7 +348,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
}
// Hashes are not supported
func (f *Fs) Hashes() fs.HashSet {
func (f *Fs) Hashes() hash.Set {
return 0
}
@ -565,8 +567,8 @@ func (o *Object) Remote() string {
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrHashUnsupported
}
// Size returns the size of an object in bytes

View file

@ -28,6 +28,11 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/pkg/errors"
"golang.org/x/oauth2"
@ -46,14 +51,14 @@ const (
)
var (
gcsLocation = fs.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
gcsStorageClass = fs.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
@ -65,7 +70,7 @@ func init() {
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string) {
if fs.ConfigFileGet(name, "service_account_file") != "" {
if config.FileGet(name, "service_account_file") != "" {
return
}
err := oauthutil.Config("google cloud storage", name, storageConfig)
@ -74,10 +79,10 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret - leave blank normally.",
}, {
Name: "project_number",
@ -280,7 +285,7 @@ func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(fs.Config.Client())
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@ -289,7 +294,7 @@ func NewFs(name, root string) (fs.Fs, error) {
var oAuthClient *http.Client
var err error
serviceAccountPath := fs.ConfigFileGet(name, "service_account_file")
serviceAccountPath := config.FileGet(name, "service_account_file")
if serviceAccountPath != "" {
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
if err != nil {
@ -311,11 +316,11 @@ func NewFs(name, root string) (fs.Fs, error) {
name: name,
bucket: bucket,
root: directory,
projectNumber: fs.ConfigFileGet(name, "project_number"),
objectACL: fs.ConfigFileGet(name, "object_acl"),
bucketACL: fs.ConfigFileGet(name, "bucket_acl"),
location: fs.ConfigFileGet(name, "location"),
storageClass: fs.ConfigFileGet(name, "storage_class"),
projectNumber: config.FileGet(name, "project_number"),
objectACL: config.FileGet(name, "object_acl"),
bucketACL: config.FileGet(name, "bucket_acl"),
location: config.FileGet(name, "location"),
storageClass: config.FileGet(name, "storage_class"),
}
f.features = (&fs.Features{
ReadMimeType: true,
@ -538,7 +543,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
@ -669,8 +674,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// ------------------------------------------------------------
@ -694,9 +699,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
return o.md5sum, nil
}

View file

@ -17,6 +17,9 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/net/html"
@ -79,7 +82,7 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string) (fs.Fs, error) {
endpoint := fs.ConfigFileGet(name, "url")
endpoint := config.FileGet(name, "url")
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
@ -94,7 +97,7 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, err
}
client := fs.Config.Client()
client := fshttp.NewClient(fs.Config)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
@ -363,8 +366,8 @@ func (o *Object) Remote() string {
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(r fs.HashType) (string, error) {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrHashUnsupported
}
// Size returns the size in bytes of the remote http file
@ -434,9 +437,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
return res.Body, nil
}
// Hashes returns fs.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashNone)
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashNone)
}
// Mkdir makes the root directory of the Fs object

View file

@ -15,6 +15,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
@ -36,12 +37,12 @@ func prepareServer(t *testing.T) func() {
ts := httptest.NewServer(fileServer)
// Configure the remote
fs.LoadConfig()
config.LoadConfig()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
fs.ConfigFileSet(remoteName, "type", "http")
fs.ConfigFileSet(remoteName, "url", ts.URL)
config.FileSet(remoteName, "type", "http")
config.FileSet(remoteName, "url", ts.URL)
// return a function to tidy up
return ts.Close

View file

@ -15,9 +15,9 @@ import (
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/swift"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"golang.org/x/oauth2"
@ -40,7 +40,7 @@ var (
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
@ -58,10 +58,10 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Hubic Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret - leave blank normally.",
}},
})
@ -159,7 +159,7 @@ func NewFs(name, root string) (fs.Fs, error) {
Auth: newAuth(f),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
Transport: fshttp.NewTransport(fs.Config),
}
err = c.Authenticate()
if err != nil {

View file

@ -16,14 +16,17 @@ import (
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
"google.golang.org/appengine/log"
)
var (
followSymlinks = fs.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
skipSymlinks = fs.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
noUTFNorm = fs.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
)
// Constants
@ -72,7 +75,7 @@ type Object struct {
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[fs.HashType]string // Hashes
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
@ -85,7 +88,7 @@ func NewFs(name, root string) (fs.Fs, error) {
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
nounc := fs.ConfigFileGet(name, "nounc")
nounc := config.FileGet(name, "nounc")
f := &Fs{
name: name,
warned: make(map[string]struct{}),
@ -532,8 +535,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.SupportedHashes
func (f *Fs) Hashes() hash.Set {
return hash.SupportedHashes
}
// ------------------------------------------------------------
@ -557,7 +560,7 @@ func (o *Object) Remote() string {
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r fs.HashType) (string, error) {
func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.modTime
oldsize := o.size
@ -571,12 +574,12 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
}
if o.hashes == nil {
o.hashes = make(map[fs.HashType]string)
o.hashes = make(map[hash.Type]string)
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
o.hashes, err = fs.HashStream(in)
o.hashes, err = hash.Stream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
@ -641,9 +644,9 @@ func (o *Object) Storable() bool {
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *fs.MultiHasher // currently accumulating hashes
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
}
// Read bytes from the object - see io.Reader
@ -670,7 +673,7 @@ func (file *localOpenFile) Close() (err error) {
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset int64
hashes := fs.SupportedHashes
hashes := hash.SupportedHashes
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@ -694,7 +697,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// don't attempt to make checksums
return fd, err
}
hash, err := fs.NewMultiHasherTypes(hashes)
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
@ -715,7 +718,7 @@ func (o *Object) mkdirAll() error {
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
hashes := fs.SupportedHashes
hashes := hash.SupportedHashes
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
@ -734,7 +737,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
// Calculate the hash of the object we are reading as we go along
hash, err := fs.NewMultiHasherTypes(hashes)
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}

View file

@ -9,10 +9,11 @@ import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
)
var (
oneFileSystem = fs.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
)
// readDevice turns a valid os.FileInfo into a device number,

View file

@ -15,16 +15,16 @@ import (
"time"
"github.com/ncw/rclone/backend/onedrive/api"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/onedrive/api"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
@ -56,7 +56,7 @@ var (
TokenURL: "https://login.live.com/oauth20_token.srf",
},
ClientID: rclonePersonalClientID,
ClientSecret: fs.MustReveal(rclonePersonalEncryptedClientSecret),
ClientSecret: config.MustReveal(rclonePersonalEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
@ -67,7 +67,7 @@ var (
TokenURL: "https://login.microsoftonline.com/common/oauth2/token",
},
ClientID: rcloneBusinessClientID,
ClientSecret: fs.MustReveal(rcloneBusinessEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneBusinessEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
@ -87,7 +87,7 @@ func init() {
fmt.Printf("Choose OneDrive account type?\n")
fmt.Printf(" * Say b for a OneDrive business account\n")
fmt.Printf(" * Say p for a personal OneDrive account\n")
isPersonal := fs.Command([]string{"bBusiness", "pPersonal"}) == 'p'
isPersonal := config.Command([]string{"bBusiness", "pPersonal"}) == 'p'
if isPersonal {
// for personal accounts we don't safe a field about the account
@ -103,7 +103,7 @@ func init() {
}
// Are we running headless?
if fs.ConfigFileGet(name, fs.ConfigAutomatic) != "" {
if config.FileGet(name, config.ConfigAutomatic) != "" {
// Yes, okay we are done
return
}
@ -159,10 +159,10 @@ func init() {
} else if len(resourcesID) == 1 {
foundService = resourcesID[0]
} else {
foundService = fs.Choose("Choose resource URL", resourcesID, resourcesURL, false)
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
}
fs.ConfigFileSet(name, configResourceURL, foundService)
config.FileSet(name, configResourceURL, foundService)
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
// get the token from the inital config
@ -218,16 +218,16 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Microsoft App Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Microsoft App Client Secret - leave blank normally.",
}},
})
fs.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
fs.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
flags.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
}
// Fs represents a remote one drive
@ -306,7 +306,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRety || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// readMetaDataForPath reads the metadata from the path
@ -339,7 +339,7 @@ func errorHandler(resp *http.Response) error {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
// get the resource URL from the config file0
resourceURL := fs.ConfigFileGet(name, configResourceURL, "")
resourceURL := config.FileGet(name, configResourceURL, "")
// if we have a resource URL it's a business account otherwise a personal one
var rootURL string
var oauthConfig *oauth2.Config
@ -743,10 +743,10 @@ func (f *Fs) waitForJob(location string, o *Object) error {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts)
if err != nil {
return fs.ShouldRetry(err), err
return fserrors.ShouldRetry(err), err
}
body, err = rest.ReadBody(resp)
return fs.ShouldRetry(err), err
return fserrors.ShouldRetry(err), err
})
if err != nil {
return err
@ -915,8 +915,8 @@ func (f *Fs) DirCacheFlush() {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashSHA1)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashSHA1)
}
// ------------------------------------------------------------
@ -945,9 +945,9 @@ func (o *Object) srvPath() string {
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashSHA1 {
return "", hash.ErrHashUnsupported
}
return o.sha1, nil
}
@ -1161,7 +1161,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
if remaining < n {
n = remaining
}
seg := fs.NewRepeatableReader(io.LimitReader(in, n))
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
err = o.uploadFragment(uploadURL, position, size, seg, n)
if err != nil {

View file

@ -22,16 +22,15 @@ import (
"time"
"github.com/ncw/rclone/backend/pcloud/api"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/rclone/oauthutil"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/pcloud/api"
"github.com/ncw/rclone/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
@ -56,7 +55,7 @@ var (
TokenURL: "https://api.pcloud.com/oauth2_token",
},
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
@ -75,14 +74,14 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Pcloud App Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Pcloud App Client Secret - leave blank normally.",
}},
})
fs.VarP(&uploadCutoff, "pcloud-upload-cutoff", "", "Cutoff for switching to multipart upload")
flags.VarP(&uploadCutoff, "pcloud-upload-cutoff", "", "Cutoff for switching to multipart upload")
}
// Fs represents a remote pcloud
@ -174,7 +173,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
doRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
return doRetry || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for pcloud
@ -812,8 +811,8 @@ func (f *Fs) DirCacheFlush() {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5 | fs.HashSHA1)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5 | hash.HashSHA1)
}
// ------------------------------------------------------------
@ -859,9 +858,9 @@ func (o *Object) getHashes() (err error) {
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 && t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 && t != hash.HashSHA1 {
return "", hash.ErrHashUnsupported
}
if o.md5 == "" && o.sha1 == "" {
err := o.getHashes()
@ -869,7 +868,7 @@ func (o *Object) Hash(t fs.HashType) (string, error) {
return "", errors.Wrap(err, "failed to get hash")
}
}
if t == fs.HashMD5 {
if t == hash.HashMD5 {
return o.md5, nil
}
return o.sha1, nil

View file

@ -17,8 +17,12 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/yunify/qingstor-sdk-go/config"
qsConfig "github.com/yunify/qingstor-sdk-go/config"
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
qs "github.com/yunify/qingstor-sdk-go/service"
)
@ -162,11 +166,11 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
// qsConnection makes a connection to qingstor
func qsServiceConnection(name string) (*qs.Service, error) {
accessKeyID := fs.ConfigFileGet(name, "access_key_id")
secretAccessKey := fs.ConfigFileGet(name, "secret_access_key")
accessKeyID := config.FileGet(name, "access_key_id")
secretAccessKey := config.FileGet(name, "secret_access_key")
switch {
case fs.ConfigFileGetBool(name, "env_auth", false):
case config.FileGetBool(name, "env_auth", false):
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
@ -180,7 +184,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
host := "qingstor.com"
port := 443
endpoint := fs.ConfigFileGet(name, "endpoint", "")
endpoint := config.FileGet(name, "endpoint", "")
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
@ -201,19 +205,19 @@ func qsServiceConnection(name string) (*qs.Service, error) {
}
connectionRetries := 3
retries := fs.ConfigFileGet(name, "connection_retries", "")
retries := config.FileGet(name, "connection_retries", "")
if retries != "" {
connectionRetries, _ = strconv.Atoi(retries)
}
cf, err := config.NewDefault()
cf, err := qsConfig.NewDefault()
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = connectionRetries
cf.Connection = fs.Config.Client()
cf.Connection = fshttp.NewClient(fs.Config)
svc, _ := qs.Init(cf)
@ -231,7 +235,7 @@ func NewFs(name, root string) (fs.Fs, error) {
return nil, err
}
zone := fs.ConfigFileGet(name, "zone")
zone := config.FileGet(name, "zone")
if zone == "" {
zone = "pek3a"
}
@ -302,9 +306,9 @@ func (f *Fs) Precision() time.Duration {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
//return fs.HashSet(fs.HashNone)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
//return hash.HashSet(hash.HashNone)
}
// Features returns the optional features of this Fs
@ -591,7 +595,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
@ -925,9 +929,9 @@ var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum

View file

@ -37,6 +37,11 @@ import (
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/swift"
"github.com/pkg/errors"
@ -233,8 +238,8 @@ const (
// Globals
var (
// Flags
s3ACL = fs.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
s3StorageClass = fs.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
)
// Fs represents a remote s3 server
@ -316,9 +321,9 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
func s3Connection(name string) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: fs.ConfigFileGet(name, "access_key_id"),
SecretAccessKey: fs.ConfigFileGet(name, "secret_access_key"),
SessionToken: fs.ConfigFileGet(name, "session_token"),
AccessKeyID: config.FileGet(name, "access_key_id"),
SecretAccessKey: config.FileGet(name, "secret_access_key"),
SessionToken: config.FileGet(name, "session_token"),
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
@ -348,7 +353,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
cred := credentials.NewChainCredentials(providers)
switch {
case fs.ConfigFileGetBool(name, "env_auth", false):
case config.FileGetBool(name, "env_auth", false):
// No need for empty checks if "env_auth" is true
case v.AccessKeyID == "" && v.SecretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
@ -359,8 +364,8 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
return nil, nil, errors.New("secret_access_key not found")
}
endpoint := fs.ConfigFileGet(name, "endpoint")
region := fs.ConfigFileGet(name, "region")
endpoint := config.FileGet(name, "endpoint")
region := config.FileGet(name, "region")
if region == "" && endpoint == "" {
endpoint = "https://s3.amazonaws.com/"
}
@ -372,7 +377,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
WithMaxRetries(maxRetries).
WithCredentials(cred).
WithEndpoint(endpoint).
WithHTTPClient(fs.Config.Client()).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(true)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
ses := session.New()
@ -408,11 +413,11 @@ func NewFs(name, root string) (fs.Fs, error) {
c: c,
bucket: bucket,
ses: ses,
acl: fs.ConfigFileGet(name, "acl"),
acl: config.FileGet(name, "acl"),
root: directory,
locationConstraint: fs.ConfigFileGet(name, "location_constraint"),
sse: fs.ConfigFileGet(name, "server_side_encryption"),
storageClass: fs.ConfigFileGet(name, "storage_class"),
locationConstraint: config.FileGet(name, "location_constraint"),
sse: config.FileGet(name, "server_side_encryption"),
storageClass: config.FileGet(name, "storage_class"),
}
f.features = (&fs.Features{
ReadMimeType: true,
@ -657,7 +662,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
@ -804,8 +809,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// ------------------------------------------------------------
@ -831,9 +836,9 @@ func (o *Object) Remote() string {
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
hash := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
@ -1027,7 +1032,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
if size > uploader.PartSize {
hash, err := src.Hash(fs.HashMD5)
hash, err := src.Hash(hash.HashMD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)

View file

@ -16,6 +16,9 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/xanzy/ssh-agent"
@ -94,7 +97,7 @@ type Fs struct {
port string
url string
mkdirLock *stringLock
cachedHashes *fs.HashSet
cachedHashes *hash.Set
poolMu sync.Mutex
pool []*conn
connLimit *rate.Limiter // for limiting number of connections per second
@ -134,13 +137,13 @@ func readCurrentUser() (userName string) {
// Dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
func Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fs.Config.NewDialer()
func Dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(fs.Config)
conn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
if err != nil {
return nil, err
}
@ -263,19 +266,19 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string) (fs.Fs, error) {
user := fs.ConfigFileGet(name, "user")
host := fs.ConfigFileGet(name, "host")
port := fs.ConfigFileGet(name, "port")
pass := fs.ConfigFileGet(name, "pass")
keyFile := fs.ConfigFileGet(name, "key_file")
insecureCipher := fs.ConfigFileGetBool(name, "use_insecure_cipher")
user := config.FileGet(name, "user")
host := config.FileGet(name, "host")
port := config.FileGet(name, "port")
pass := config.FileGet(name, "pass")
keyFile := config.FileGet(name, "key_file")
insecureCipher := config.FileGetBool(name, "use_insecure_cipher")
if user == "" {
user = currentUser
}
if port == "" {
port = "22"
}
config := &ssh.ClientConfig{
sshConfig := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
@ -283,8 +286,8 @@ func NewFs(name, root string) (fs.Fs, error) {
}
if insecureCipher {
config.Config.SetDefaults()
config.Config.Ciphers = append(config.Config.Ciphers, "aes128-cbc")
sshConfig.Config.SetDefaults()
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
}
// Add ssh agent-auth if no password or file specified
@ -297,7 +300,7 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "couldn't read ssh agent signers")
}
config.Auth = append(config.Auth, ssh.PublicKeys(signers...))
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signers...))
}
// Load key file if specified
@ -310,22 +313,22 @@ func NewFs(name, root string) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to parse private key file")
}
config.Auth = append(config.Auth, ssh.PublicKeys(signer))
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
}
// Auth from password if specified
if pass != "" {
clearpass, err := fs.Reveal(pass)
clearpass, err := config.Reveal(pass)
if err != nil {
return nil, err
}
config.Auth = append(config.Auth, ssh.Password(clearpass))
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
f := &Fs{
name: name,
root: root,
config: config,
config: sshConfig,
host: host,
port: port,
url: "sftp://" + user + "@" + host + ":" + port + "/" + root,
@ -631,25 +634,25 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() fs.HashSet {
func (f *Fs) Hashes() hash.Set {
if f.cachedHashes != nil {
return *f.cachedHashes
}
hashcheckDisabled := fs.ConfigFileGetBool(f.name, "disable_hashcheck")
hashcheckDisabled := config.FileGetBool(f.name, "disable_hashcheck")
if hashcheckDisabled {
return fs.HashSet(fs.HashNone)
return hash.Set(hash.HashNone)
}
c, err := f.getSftpConnection()
if err != nil {
fs.Errorf(f, "Couldn't get SSH connection to figure out Hashes: %v", err)
return fs.HashSet(fs.HashNone)
return hash.Set(hash.HashNone)
}
defer f.putSftpConnection(&c, err)
session, err := c.sshClient.NewSession()
if err != nil {
return fs.HashSet(fs.HashNone)
return hash.Set(hash.HashNone)
}
sha1Output, _ := session.Output("echo 'abc' | sha1sum")
expectedSha1 := "03cfd743661f07975fa2f1220c5194cbaff48451"
@ -657,7 +660,7 @@ func (f *Fs) Hashes() fs.HashSet {
session, err = c.sshClient.NewSession()
if err != nil {
return fs.HashSet(fs.HashNone)
return hash.Set(hash.HashNone)
}
md5Output, _ := session.Output("echo 'abc' | md5sum")
expectedMd5 := "0bee89b07a248e27c83fc3d5951213c1"
@ -666,15 +669,15 @@ func (f *Fs) Hashes() fs.HashSet {
sha1Works := parseHash(sha1Output) == expectedSha1
md5Works := parseHash(md5Output) == expectedMd5
set := fs.NewHashSet()
set := hash.NewHashSet()
if !sha1Works && !md5Works {
set.Add(fs.HashNone)
set.Add(hash.HashNone)
}
if sha1Works {
set.Add(fs.HashSHA1)
set.Add(hash.HashSHA1)
}
if md5Works {
set.Add(fs.HashMD5)
set.Add(hash.HashMD5)
}
_ = session.Close()
@ -702,10 +705,10 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(r fs.HashType) (string, error) {
if r == fs.HashMD5 && o.md5sum != nil {
func (o *Object) Hash(r hash.Type) (string, error) {
if r == hash.HashMD5 && o.md5sum != nil {
return *o.md5sum, nil
} else if r == fs.HashSHA1 && o.sha1sum != nil {
} else if r == hash.HashSHA1 && o.sha1sum != nil {
return *o.sha1sum, nil
}
@ -717,29 +720,29 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
o.fs.putSftpConnection(&c, err)
if err != nil {
o.fs.cachedHashes = nil // Something has changed on the remote system
return "", fs.ErrHashUnsupported
return "", hash.ErrHashUnsupported
}
err = fs.ErrHashUnsupported
err = hash.ErrHashUnsupported
var outputBytes []byte
escapedPath := shellEscape(o.path())
if r == fs.HashMD5 {
if r == hash.HashMD5 {
outputBytes, err = session.Output("md5sum " + escapedPath)
} else if r == fs.HashSHA1 {
} else if r == hash.HashSHA1 {
outputBytes, err = session.Output("sha1sum " + escapedPath)
}
if err != nil {
o.fs.cachedHashes = nil // Something has changed on the remote system
_ = session.Close()
return "", fs.ErrHashUnsupported
return "", hash.ErrHashUnsupported
}
_ = session.Close()
str := parseHash(outputBytes)
if r == fs.HashMD5 {
if r == hash.HashMD5 {
o.md5sum = &str
} else if r == fs.HashSHA1 {
} else if r == hash.HashSHA1 {
o.sha1sum = &str
}
return str, nil
@ -812,7 +815,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
if err != nil {
return errors.Wrap(err, "SetModTime")
}
if fs.ConfigFileGetBool(o.fs.name, "set_modtime", true) {
if config.FileGetBool(o.fs.name, "set_modtime", true) {
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {

View file

@ -14,6 +14,13 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/swift"
"github.com/pkg/errors"
)
@ -118,7 +125,7 @@ func init() {
},
},
})
fs.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
flags.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
}
// Fs represents a remote swift server
@ -191,24 +198,24 @@ func parsePath(path string) (container, directory string, err error) {
func swiftConnection(name string) (*swift.Connection, error) {
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: fs.ConfigFileGet(name, "user"),
ApiKey: fs.ConfigFileGet(name, "key"),
AuthUrl: fs.ConfigFileGet(name, "auth"),
UserId: fs.ConfigFileGet(name, "user_id"),
Domain: fs.ConfigFileGet(name, "domain"),
Tenant: fs.ConfigFileGet(name, "tenant"),
TenantId: fs.ConfigFileGet(name, "tenant_id"),
TenantDomain: fs.ConfigFileGet(name, "tenant_domain"),
Region: fs.ConfigFileGet(name, "region"),
StorageUrl: fs.ConfigFileGet(name, "storage_url"),
AuthToken: fs.ConfigFileGet(name, "auth_token"),
AuthVersion: fs.ConfigFileGetInt(name, "auth_version", 0),
EndpointType: swift.EndpointType(fs.ConfigFileGet(name, "endpoint_type", "public")),
UserName: config.FileGet(name, "user"),
ApiKey: config.FileGet(name, "key"),
AuthUrl: config.FileGet(name, "auth"),
UserId: config.FileGet(name, "user_id"),
Domain: config.FileGet(name, "domain"),
Tenant: config.FileGet(name, "tenant"),
TenantId: config.FileGet(name, "tenant_id"),
TenantDomain: config.FileGet(name, "tenant_domain"),
Region: config.FileGet(name, "region"),
StorageUrl: config.FileGet(name, "storage_url"),
AuthToken: config.FileGet(name, "auth_token"),
AuthVersion: config.FileGetInt(name, "auth_version", 0),
EndpointType: swift.EndpointType(config.FileGet(name, "endpoint_type", "public")),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fs.Config.Transport(),
Transport: fshttp.NewTransport(fs.Config),
}
if fs.ConfigFileGetBool(name, "env_auth", false) {
if config.FileGetBool(name, "env_auth", false) {
err := c.ApplyEnvironment()
if err != nil {
return nil, errors.Wrap(err, "failed to read environment variables")
@ -466,7 +473,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.container == "" {
return errors.New("container needed for recursive list")
}
list := fs.NewListRHelper(callback)
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(entry fs.DirEntry) error {
return list.Add(entry)
})
@ -549,7 +556,7 @@ func (f *Fs) Purge() error {
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- fs.DeleteFiles(toBeDeleted)
delErr <- operations.DeleteFiles(toBeDeleted)
}()
err := f.list("", true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
@ -596,8 +603,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// ------------------------------------------------------------
@ -621,9 +628,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
@ -855,7 +862,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if o.fs.container == "" {
return fs.FatalError(errors.New("container name needed in remote"))
return fserrors.FatalError(errors.New("container name needed in remote"))
}
err := o.fs.Mkdir("")
if err != nil {

View file

@ -30,11 +30,12 @@ import (
"github.com/ncw/rclone/backend/webdav/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/rclone/pacer"
"github.com/ncw/rclone/rest"
"github.com/ncw/rclone/webdav/api"
"github.com/pkg/errors"
)
@ -159,7 +160,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// itemIsDir returns true if the item is a directory
@ -250,21 +251,21 @@ func (o *Object) filePath() string {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
endpoint := fs.ConfigFileGet(name, "url")
endpoint := config.FileGet(name, "url")
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
user := fs.ConfigFileGet(name, "user")
pass := fs.ConfigFileGet(name, "pass")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
if pass != "" {
var err error
pass, err = fs.Reveal(pass)
pass, err = config.Reveal(pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
}
vendor := fs.ConfigFileGet(name, "vendor")
vendor := config.FileGet(name, "vendor")
// Parse the endpoint
u, err := url.Parse(endpoint)
@ -277,7 +278,7 @@ func NewFs(name, root string) (fs.Fs, error) {
root: root,
endpoint: u,
endpointURL: u.String(),
srv: rest.NewClient(fs.Config.Client()).SetRoot(u.String()).SetUserPass(user, pass),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()).SetUserPass(user, pass),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
user: user,
pass: pass,
@ -765,8 +766,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashNone)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashNone)
}
// ------------------------------------------------------------
@ -790,9 +791,9 @@ func (o *Object) Remote() string {
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashSHA1 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashSHA1 {
return "", hash.ErrHashUnsupported
}
return o.sha1, nil
}

View file

@ -15,9 +15,11 @@ import (
yandex "github.com/ncw/rclone/backend/yandex/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/oauthutil"
yandex "github.com/ncw/rclone/yandex/api"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
@ -37,7 +39,7 @@ var (
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
},
ClientID: rcloneClientID,
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
@ -55,10 +57,10 @@ func init() {
}
},
Options: []fs.Option{{
Name: fs.ConfigClientID,
Name: config.ConfigClientID,
Help: "Yandex Client Id - leave blank normally.",
}, {
Name: fs.ConfigClientSecret,
Name: config.ConfigClientSecret,
Help: "Yandex Client Secret - leave blank normally.",
}},
})
@ -109,7 +111,7 @@ func (f *Fs) Features() *fs.Features {
// read access token from ConfigFile string
func getAccessToken(name string) (*oauth2.Token, error) {
// Read the token from the config file
tokenConfig := fs.ConfigFileGet(name, "token")
tokenConfig := config.FileGet(name, "token")
//Get access token from config string
decoder := json.NewDecoder(strings.NewReader(tokenConfig))
var result *oauth2.Token
@ -129,7 +131,7 @@ func NewFs(name, root string) (fs.Fs, error) {
}
//create new client
yandexDisk := yandex.NewClient(token.AccessToken, fs.Config.Client())
yandexDisk := yandex.NewClient(token.AccessToken, fshttp.NewClient(fs.Config))
f := &Fs{
name: name,
@ -487,8 +489,8 @@ func (f *Fs) CleanUp() error {
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() fs.HashSet {
return fs.HashSet(fs.HashMD5)
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.HashMD5)
}
// ------------------------------------------------------------
@ -512,9 +514,9 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t fs.HashType) (string, error) {
if t != fs.HashMD5 {
return "", fs.ErrHashUnsupported
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.HashMD5 {
return "", hash.ErrHashUnsupported
}
return o.md5sum, nil
}
@ -578,7 +580,7 @@ func (o *Object) remotePath() string {
//
// The new object may have been created if an error is returned
func (o *Object) Update(in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
in := fs.NewCountingReader(in0)
in := readers.NewCountingReader(in0)
modTime := src.ModTime()
remote := o.remotePath()