rclone/backend/cache/object.go

340 lines
8.3 KiB
Go
Raw Normal View History

// +build !plan9,go1.7
2017-11-12 17:54:25 +00:00
package cache
import (
"io"
"os"
"path"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
2018-01-22 19:44:55 +00:00
"github.com/ncw/rclone/lib/readers"
2018-01-29 22:05:04 +00:00
"github.com/pkg/errors"
)
const (
objectInCache = "Object"
objectPendingUpload = "TempObject"
2017-11-12 17:54:25 +00:00
)
// Object is a generic file like object that stores basic information about it
type Object struct {
fs.Object `json:"-"`
2018-01-29 22:05:04 +00:00
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
2018-01-29 22:05:04 +00:00
CacheHashes map[hash.Type]string // all supported hashes cached
2017-11-12 17:54:25 +00:00
refreshMutex sync.Mutex
}
// NewObject builds one from a generic fs.Object
2018-01-29 22:05:04 +00:00
func NewObject(f *Fs, remote string) *Object {
2017-11-12 17:54:25 +00:00
fullRemote := path.Join(f.Root(), remote)
dir, name := path.Split(fullRemote)
2018-01-29 22:05:04 +00:00
cacheType := objectInCache
parentFs := f.UnWrap()
if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
2017-11-12 17:54:25 +00:00
co := &Object{
2018-01-29 22:05:04 +00:00
ParentFs: parentFs,
2017-11-12 17:54:25 +00:00
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheStorable: false,
2018-01-29 22:05:04 +00:00
CacheType: cacheType,
CacheTs: time.Now(),
2017-11-12 17:54:25 +00:00
}
return co
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
2018-01-29 22:05:04 +00:00
cacheType := objectInCache
parentFs := f.UnWrap()
if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
2017-11-12 17:54:25 +00:00
co = &Object{
2018-01-29 22:05:04 +00:00
ParentFs: parentFs,
2017-11-12 17:54:25 +00:00
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
2018-01-29 22:05:04 +00:00
CacheType: cacheType,
CacheTs: time.Now(),
2017-11-12 17:54:25 +00:00
}
co.updateData(o)
return co
}
func (o *Object) updateData(source fs.Object) {
o.Object = source
o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
2018-01-29 22:05:04 +00:00
o.CacheHashes = make(map[hash.Type]string)
2017-11-12 17:54:25 +00:00
}
// Fs returns its FS info
func (o *Object) Fs() fs.Info {
return o.CacheFs
}
// String returns a human friendly name for this object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
p := path.Join(o.Dir, o.Name)
2018-01-29 22:05:04 +00:00
return o.CacheFs.cleanRootFromPath(p)
2017-11-12 17:54:25 +00:00
}
// abs returns the absolute path to the object
func (o *Object) abs() string {
return path.Join(o.Dir, o.Name)
}
// ModTime returns the cached ModTime
func (o *Object) ModTime() time.Time {
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
return o.CacheStorable
}
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
2018-01-29 22:05:04 +00:00
func (o *Object) refreshFromSource(force bool) error {
2017-11-12 17:54:25 +00:00
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
2018-01-29 22:05:04 +00:00
var err error
var liveObject fs.Object
2017-11-12 17:54:25 +00:00
2018-01-29 22:05:04 +00:00
if o.Object != nil && !force {
2017-11-12 17:54:25 +00:00
return nil
}
2018-01-29 22:05:04 +00:00
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
2017-11-12 17:54:25 +00:00
if err != nil {
2018-01-29 22:05:04 +00:00
fs.Errorf(o, "error refreshing object in : %v", err)
2017-11-12 17:54:25 +00:00
return err
}
o.updateData(liveObject)
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(t time.Time) error {
2018-01-29 22:05:04 +00:00
if err := o.refreshFromSource(false); err != nil {
2017-11-12 17:54:25 +00:00
return err
}
err := o.Object.SetModTime(t)
if err != nil {
return err
}
o.CacheModTime = t.UnixNano()
o.persist()
2018-01-29 22:05:04 +00:00
fs.Debugf(o, "updated ModTime: %v", t)
2017-11-12 17:54:25 +00:00
return nil
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
2018-01-29 22:05:04 +00:00
if err := o.refreshFromSource(true); err != nil {
2017-11-12 17:54:25 +00:00
return nil, err
}
var err error
2018-01-29 22:05:04 +00:00
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
2017-11-12 17:54:25 +00:00
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
2018-01-22 19:44:55 +00:00
offset, limit = x.Decode(o.Size())
}
2018-01-22 19:44:55 +00:00
_, err = cacheReader.Seek(offset, os.SEEK_SET)
if err != nil {
2018-01-22 19:44:55 +00:00
return nil, err
2017-11-12 17:54:25 +00:00
}
}
2018-01-22 19:44:55 +00:00
return readers.NewLimitedReadCloser(cacheReader, limit), nil
2017-11-12 17:54:25 +00:00
}
// Update will change the object data
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
2018-01-29 22:05:04 +00:00
if err := o.refreshFromSource(false); err != nil {
2017-11-12 17:54:25 +00:00
return err
}
2018-01-29 22:05:04 +00:00
// pause background uploads if active
if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
2017-11-12 17:54:25 +00:00
2018-01-29 22:05:04 +00:00
// FIXME use reliable upload
2017-11-12 17:54:25 +00:00
err := o.Object.Update(in, src, options...)
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
}
2018-01-29 22:05:04 +00:00
// deleting cached chunks and info to be replaced with new ones
_ = o.CacheFs.cache.RemoveObject(o.abs())
2017-11-12 17:54:25 +00:00
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
2018-01-29 22:05:04 +00:00
o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now()
2017-11-12 17:54:25 +00:00
o.persist()
return nil
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove() error {
2018-01-29 22:05:04 +00:00
if err := o.refreshFromSource(false); err != nil {
2017-11-12 17:54:25 +00:00
return err
}
2018-01-29 22:05:04 +00:00
// pause background uploads if active
if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
2017-11-12 17:54:25 +00:00
err := o.Object.Remove()
if err != nil {
return err
}
2018-01-29 22:05:04 +00:00
fs.Debugf(o, "removing object")
2017-11-12 17:54:25 +00:00
_ = o.CacheFs.cache.RemoveObject(o.abs())
2018-01-29 22:05:04 +00:00
_ = o.CacheFs.cache.removePendingUpload(o.abs())
2018-02-10 20:01:05 +00:00
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
_ = o.CacheFs.cache.ExpireDir(parentCd)
// advertise to DirChangeNotify if wrapped doesn't do that
o.CacheFs.notifyDirChangeUpstreamIfNeeded(parentCd.Remote())
2018-01-29 22:05:04 +00:00
return nil
2017-11-12 17:54:25 +00:00
}
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ht hash.Type) (string, error) {
2018-01-29 22:05:04 +00:00
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
2017-11-12 17:54:25 +00:00
}
2018-01-29 22:05:04 +00:00
cachedHash, found := o.CacheHashes[ht]
2017-11-12 17:54:25 +00:00
if found {
return cachedHash, nil
}
2018-01-29 22:05:04 +00:00
if err := o.refreshFromSource(false); err != nil {
2017-11-12 17:54:25 +00:00
return "", err
}
liveHash, err := o.Object.Hash(ht)
if err != nil {
return "", err
}
2018-01-29 22:05:04 +00:00
o.CacheHashes[ht] = liveHash
2017-11-12 17:54:25 +00:00
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)
return liveHash, nil
}
// persist adds this object to the persistent cache
func (o *Object) persist() *Object {
err := o.CacheFs.cache.AddObject(o)
if err != nil {
fs.Errorf(o, "failed to cache object: %v", err)
}
return o
}
2018-01-29 22:05:04 +00:00
func (o *Object) isTempFile() bool {
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
o.CacheType = objectInCache
return false
}
o.CacheType = objectPendingUpload
return true
}
func (o *Object) tempFileStartedUpload() bool {
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
return false
}
return started
}
2017-11-12 17:54:25 +00:00
var (
_ fs.Object = (*Object)(nil)
)