Break the fs package up into smaller parts.
The purpose of this is to make it easier to maintain and eventually to allow the rclone backends to be re-used in other projects without having to use the rclone configuration system. The new code layout is documented in CONTRIBUTING.
This commit is contained in:
parent
92624bbbf1
commit
11da2a6c9b
183 changed files with 5749 additions and 5063 deletions
56
backend/cache/cache.go
vendored
56
backend/cache/cache.go
vendored
|
@ -18,6 +18,10 @@ import (
|
|||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
|
@ -47,18 +51,18 @@ const (
|
|||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cacheDbPath = fs.StringP("cache-db-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cache DB")
|
||||
cacheChunkPath = fs.StringP("cache-chunk-path", "", filepath.Join(fs.CacheDir, "cache-backend"), "Directory to cached chunk files")
|
||||
cacheDbPurge = fs.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
||||
cacheChunkSize = fs.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
||||
cacheTotalChunkSize = fs.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
|
||||
cacheChunkCleanInterval = fs.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
|
||||
cacheInfoAge = fs.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
||||
cacheReadRetries = fs.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
||||
cacheTotalWorkers = fs.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
||||
cacheChunkNoMemory = fs.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
||||
cacheRps = fs.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
||||
cacheStoreWrites = fs.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
||||
cacheDbPath = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB")
|
||||
cacheChunkPath = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files")
|
||||
cacheDbPurge = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
||||
cacheChunkSize = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
||||
cacheTotalChunkSize = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
|
||||
cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
|
||||
cacheInfoAge = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
||||
cacheReadRetries = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
||||
cacheTotalWorkers = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
||||
cacheChunkNoMemory = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
||||
cacheRps = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
||||
cacheStoreWrites = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
|
@ -223,7 +227,7 @@ type Fs struct {
|
|||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
remote := fs.ConfigFileGet(name, "remote")
|
||||
remote := config.FileGet(name, "remote")
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
@ -235,10 +239,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||
}
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
|
||||
plexURL := fs.ConfigFileGet(name, "plex_url")
|
||||
plexToken := fs.ConfigFileGet(name, "plex_token")
|
||||
plexURL := config.FileGet(name, "plex_url")
|
||||
plexToken := config.FileGet(name, "plex_token")
|
||||
var chunkSize fs.SizeSuffix
|
||||
chunkSizeString := fs.ConfigFileGet(name, "chunk_size", DefCacheChunkSize)
|
||||
chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize)
|
||||
if *cacheChunkSize != DefCacheChunkSize {
|
||||
chunkSizeString = *cacheChunkSize
|
||||
}
|
||||
|
@ -247,7 +251,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||
return nil, errors.Wrapf(err, "failed to understand chunk size", chunkSizeString)
|
||||
}
|
||||
var chunkTotalSize fs.SizeSuffix
|
||||
chunkTotalSizeString := fs.ConfigFileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
|
||||
chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
|
||||
if *cacheTotalChunkSize != DefCacheTotalChunkSize {
|
||||
chunkTotalSizeString = *cacheTotalChunkSize
|
||||
}
|
||||
|
@ -260,7 +264,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
|
||||
}
|
||||
infoAge := fs.ConfigFileGet(name, "info_age", DefCacheInfoAge)
|
||||
infoAge := config.FileGet(name, "info_age", DefCacheInfoAge)
|
||||
if *cacheInfoAge != DefCacheInfoAge {
|
||||
infoAge = *cacheInfoAge
|
||||
}
|
||||
|
@ -301,10 +305,10 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||
}
|
||||
} else {
|
||||
plexUsername := fs.ConfigFileGet(name, "plex_username")
|
||||
plexPassword := fs.ConfigFileGet(name, "plex_password")
|
||||
plexUsername := config.FileGet(name, "plex_username")
|
||||
plexPassword := config.FileGet(name, "plex_password")
|
||||
if plexPassword != "" && plexUsername != "" {
|
||||
decPass, err := fs.Reveal(plexPassword)
|
||||
decPass, err := config.Reveal(plexPassword)
|
||||
if err != nil {
|
||||
decPass = plexPassword
|
||||
}
|
||||
|
@ -319,8 +323,8 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||
dbPath := *cacheDbPath
|
||||
chunkPath := *cacheChunkPath
|
||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||
if dbPath != filepath.Join(fs.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(fs.CacheDir, "cache-backend") {
|
||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||
chunkPath = dbPath
|
||||
}
|
||||
if filepath.Ext(dbPath) != "" {
|
||||
|
@ -506,7 +510,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||
return cachedEntries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) recurse(dir string, list *fs.ListRHelper) error {
|
||||
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -558,7 +562,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||
}
|
||||
|
||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||
list := fs.NewListRHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.recurse(dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -895,7 +899,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.Fs.Hashes()
|
||||
}
|
||||
|
||||
|
|
60
backend/cache/cache_internal_test.go
vendored
60
backend/cache/cache_internal_test.go
vendored
|
@ -20,6 +20,8 @@ import (
|
|||
//"strings"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
//"github.com/ncw/rclone/cmd/mount"
|
||||
//_ "github.com/ncw/rclone/cmd/cmount"
|
||||
//"github.com/ncw/rclone/cmd/mountlib"
|
||||
|
@ -492,7 +494,7 @@ func writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object
|
|||
func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||
in := bytes.NewReader(data)
|
||||
modTime := time.Now()
|
||||
objInfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
|
||||
objInfo := object.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
|
||||
|
||||
obj, err := f.Put(in, objInfo)
|
||||
require.NoError(t, err)
|
||||
|
@ -503,8 +505,8 @@ func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Obje
|
|||
func updateObjectBytes(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err := f.Put(in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
|
@ -540,15 +542,15 @@ func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
|||
|
||||
func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
|
||||
dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
localExists := false
|
||||
cacheExists := false
|
||||
cryptExists := false
|
||||
for _, s := range fs.ConfigFileSections() {
|
||||
for _, s := range config.FileSections() {
|
||||
if s == localRemote {
|
||||
localExists = true
|
||||
}
|
||||
|
@ -563,28 +565,28 @@ func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote st
|
|||
localRemoteWrap := ""
|
||||
if !localExists {
|
||||
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
|
||||
fs.ConfigFileSet(localRemote, "type", "local")
|
||||
fs.ConfigFileSet(localRemote, "nounc", "true")
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
}
|
||||
|
||||
if !cacheExists {
|
||||
fs.ConfigFileSet(cacheRemote, "type", "cache")
|
||||
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
|
||||
config.FileSet(cacheRemote, "type", "cache")
|
||||
config.FileSet(cacheRemote, "remote", localRemoteWrap)
|
||||
}
|
||||
if c, ok := cfg["chunk_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
|
||||
config.FileSet(cacheRemote, "chunk_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
|
||||
config.FileSet(cacheRemote, "chunk_size", "1m")
|
||||
}
|
||||
if c, ok := cfg["chunk_total_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
|
||||
config.FileSet(cacheRemote, "chunk_total_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
|
||||
config.FileSet(cacheRemote, "chunk_total_size", "2m")
|
||||
}
|
||||
if c, ok := cfg["info_age"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", c)
|
||||
config.FileSet(cacheRemote, "info_age", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
|
||||
config.FileSet(cacheRemote, "info_age", infoAge.String())
|
||||
}
|
||||
|
||||
if !cryptExists {
|
||||
|
@ -627,14 +629,14 @@ func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote st
|
|||
|
||||
func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
|
||||
dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||
chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
localExists := false
|
||||
cacheExists := false
|
||||
for _, s := range fs.ConfigFileSections() {
|
||||
for _, s := range config.FileSections() {
|
||||
if s == localRemote {
|
||||
localExists = true
|
||||
}
|
||||
|
@ -646,28 +648,28 @@ func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[stri
|
|||
localRemoteWrap := ""
|
||||
if !localExists {
|
||||
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
|
||||
fs.ConfigFileSet(localRemote, "type", "local")
|
||||
fs.ConfigFileSet(localRemote, "nounc", "true")
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
}
|
||||
|
||||
if !cacheExists {
|
||||
fs.ConfigFileSet(cacheRemote, "type", "cache")
|
||||
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
|
||||
config.FileSet(cacheRemote, "type", "cache")
|
||||
config.FileSet(cacheRemote, "remote", localRemoteWrap)
|
||||
}
|
||||
if c, ok := cfg["chunk_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
|
||||
config.FileSet(cacheRemote, "chunk_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
|
||||
config.FileSet(cacheRemote, "chunk_size", "1m")
|
||||
}
|
||||
if c, ok := cfg["chunk_total_size"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
|
||||
config.FileSet(cacheRemote, "chunk_total_size", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
|
||||
config.FileSet(cacheRemote, "chunk_total_size", "2m")
|
||||
}
|
||||
if c, ok := cfg["info_age"]; ok {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", c)
|
||||
config.FileSet(cacheRemote, "info_age", c)
|
||||
} else {
|
||||
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
|
||||
config.FileSet(cacheRemote, "info_age", infoAge.String())
|
||||
}
|
||||
|
||||
if c, ok := cfg["cache-chunk-no-memory"]; ok {
|
||||
|
|
31
backend/cache/object.go
vendored
31
backend/cache/object.go
vendored
|
@ -13,21 +13,22 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object is a generic file like object that stores basic information about it
|
||||
type Object struct {
|
||||
fs.Object `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
cacheHashes map[fs.HashType]string // all supported hashes cached
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
cacheHashes map[hash.Type]string // all supported hashes cached
|
||||
|
||||
refreshMutex sync.Mutex
|
||||
}
|
||||
|
@ -80,10 +81,10 @@ func (o *Object) UnmarshalJSON(b []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
o.cacheHashes = make(map[hash.Type]string)
|
||||
for k, v := range aux.Hashes {
|
||||
ht, _ := strconv.Atoi(k)
|
||||
o.cacheHashes[fs.HashType(ht)] = v
|
||||
o.cacheHashes[hash.Type(ht)] = v
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -112,7 +113,7 @@ func (o *Object) updateData(source fs.Object) {
|
|||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
o.cacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
|
@ -251,7 +252,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
o.cacheHashes = make(map[hash.Type]string)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
|
@ -274,9 +275,9 @@ func (o *Object) Remove() error {
|
|||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ht fs.HashType) (string, error) {
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
if o.cacheHashes == nil {
|
||||
o.cacheHashes = make(map[fs.HashType]string)
|
||||
o.cacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
cachedHash, found := o.cacheHashes[ht]
|
||||
|
|
5
backend/cache/plex.go
vendored
5
backend/cache/plex.go
vendored
|
@ -13,6 +13,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -107,8 +108,8 @@ func (p *plexConnector) authenticate() error {
|
|||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
fs.ConfigFileSet(p.f.Name(), "plex_token", p.token)
|
||||
fs.SaveConfig()
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue