Add context propagation to rclone

- Change rclone/fs interfaces to accept context.Context
- Update interface implementations to use context.Context
- Change top level usage to propagate context to lover level functions

Context propagation is needed for stopping transfers and passing other
request-scoped values.
This commit is contained in:
Aleksandar Jankovic 2019-06-17 10:34:30 +02:00 committed by Nick Craig-Wood
parent a2c317b46e
commit f78cd1e043
156 changed files with 2570 additions and 2380 deletions

View file

@ -309,6 +309,7 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@ -415,7 +416,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
f.root += "/"
}
_, err := f.NewObject(remote)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
@ -454,7 +455,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
@ -496,7 +497,7 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied
//
// dir is the starting directory, "" for root
func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
f.containerOKMu.Lock()
deleted := f.containerDeleted
f.containerOKMu.Unlock()
@ -523,7 +524,6 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
Prefix: root,
MaxResults: int32(maxResults),
}
ctx := context.Background()
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse
@ -621,8 +621,8 @@ func (f *Fs) markContainerOK() {
}
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@ -665,11 +665,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.container == "" {
return f.listContainers(dir)
}
return f.listDir(dir)
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@ -688,12 +688,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.container == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@ -745,13 +745,13 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(in, src, options...)
return fs, fs.Update(ctx, in, src, options...)
}
// Check if the container exists
@ -784,7 +784,7 @@ func (f *Fs) dirExists() (bool, error) {
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
if f.containerOK {
@ -831,9 +831,9 @@ func (f *Fs) Mkdir(dir string) error {
}
// isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(dir string) (err error) {
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
empty := true
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false
return nil
})
@ -880,8 +880,8 @@ func (f *Fs) deleteContainer() error {
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
err := f.isEmpty(dir)
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.isEmpty(ctx, dir)
if err != nil {
return err
}
@ -902,7 +902,7 @@ func (f *Fs) Hashes() hash.Set {
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge() error {
func (f *Fs) Purge(ctx context.Context) error {
dir := "" // forward compat!
if f.root != "" || dir != "" {
// Delegate to caller if not root container
@ -920,8 +920,8 @@ func (f *Fs) Purge() error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@ -939,7 +939,6 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
options := azblob.BlobAccessConditions{}
ctx := context.Background()
var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) {
@ -960,7 +959,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
copyStatus = getMetadata.CopyStatus()
}
return f.NewObject(remote)
return f.NewObject(ctx, remote)
}
// ------------------------------------------------------------
@ -984,7 +983,7 @@ func (o *Object) Remote() string {
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
@ -1124,14 +1123,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() (result time.Time) {
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// The error is logged in readMetaData
_ = o.readMetaData()
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
@ -1140,7 +1139,6 @@ func (o *Object) SetModTime(modTime time.Time) error {
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
return o.fs.shouldRetry(err)
@ -1158,7 +1156,7 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download
var offset int64
var count int64
@ -1182,7 +1180,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
}
}
blob := o.getBlobReference()
ctx := context.Background()
ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) {
@ -1371,26 +1368,26 @@ outer:
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
err = o.fs.Mkdir("")
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
err = o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
size := src.Size()
// Update Mod time
o.updateMetadataWithModTime(src.ModTime())
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return err
}
blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(o)
httpHeaders.ContentType = fs.MimeType(ctx, o)
// Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
@ -1415,7 +1412,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
}
ctx := context.Background()
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload {
@ -1448,11 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
// Remove an object
func (o *Object) Remove() error {
func (o *Object) Remove(ctx context.Context) error {
blob := o.getBlobReference()
snapShotOptions := azblob.DeleteSnapshotsOptionNone
ac := azblob.BlobAccessConditions{}
ctx := context.Background()
return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(err)
@ -1460,7 +1455,7 @@ func (o *Object) Remove() error {
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}