forked from TrueCloudLab/rclone
Convert to using github.com/pkg/errors everywhere
This commit is contained in:
parent
7fe653c350
commit
4c5b2833b3
32 changed files with 187 additions and 161 deletions
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/oauthutil"
|
"github.com/ncw/rclone/oauthutil"
|
||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
@ -185,7 +186,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
|
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rootID
|
// Get rootID
|
||||||
|
@ -195,7 +196,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil || rootInfo.Id == nil {
|
if err != nil || rootInfo.Id == nil {
|
||||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
return nil, errors.Wrap(err, "failed to get root")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, *rootInfo.Id, f)
|
f.dirCache = dircache.New(root, *rootInfo.Id, f)
|
||||||
|
@ -458,7 +459,7 @@ func (f *Fs) Mkdir() error {
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(check bool) error {
|
func (f *Fs) purgeCheck(check bool) error {
|
||||||
if f.root == "" {
|
if f.root == "" {
|
||||||
return fmt.Errorf("Can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(false)
|
||||||
|
@ -487,7 +488,7 @@ func (f *Fs) purgeCheck(check bool) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !empty {
|
if !empty {
|
||||||
return fmt.Errorf("Directory not empty")
|
return errors.New("directory not empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
30
b2/b2.go
30
b2/b2.go
|
@ -10,7 +10,6 @@ package b2
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
@ -28,6 +27,7 @@ import (
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
"github.com/ncw/rclone/rest"
|
"github.com/ncw/rclone/rest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -119,7 +119,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||||
func parsePath(path string) (bucket, directory string, err error) {
|
func parsePath(path string) (bucket, directory string, err error) {
|
||||||
parts := matcher.FindStringSubmatch(path)
|
parts := matcher.FindStringSubmatch(path)
|
||||||
if parts == nil {
|
if parts == nil {
|
||||||
err = fmt.Errorf("Couldn't find bucket in b2 path %q", path)
|
err = errors.Errorf("couldn't find bucket in b2 path %q", path)
|
||||||
} else {
|
} else {
|
||||||
bucket, directory = parts[1], parts[2]
|
bucket, directory = parts[1], parts[2]
|
||||||
directory = strings.Trim(directory, "/")
|
directory = strings.Trim(directory, "/")
|
||||||
|
@ -207,7 +207,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
}
|
}
|
||||||
err = f.authorizeAccount()
|
err = f.authorizeAccount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to authorize account: %v", err)
|
return nil, errors.Wrap(err, "failed to authorize account")
|
||||||
}
|
}
|
||||||
if f.root != "" {
|
if f.root != "" {
|
||||||
f.root += "/"
|
f.root += "/"
|
||||||
|
@ -247,7 +247,7 @@ func (f *Fs) authorizeAccount() error {
|
||||||
return f.shouldRetryNoReauth(resp, err)
|
return f.shouldRetryNoReauth(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to authenticate: %v", err)
|
return errors.Wrap(err, "failed to authenticate")
|
||||||
}
|
}
|
||||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||||
return nil
|
return nil
|
||||||
|
@ -276,7 +276,7 @@ func (f *Fs) getUploadURL() (upload *api.GetUploadURLResponse, err error) {
|
||||||
return f.shouldRetryNoReauth(resp, err)
|
return f.shouldRetryNoReauth(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to get upload URL: %v", err)
|
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
upload, f.uploads = f.uploads[0], f.uploads[1:]
|
upload, f.uploads = f.uploads[0], f.uploads[1:]
|
||||||
|
@ -562,7 +562,7 @@ func (f *Fs) getBucketID() (bucketID string, err error) {
|
||||||
|
|
||||||
})
|
})
|
||||||
if bucketID == "" {
|
if bucketID == "" {
|
||||||
err = fs.ErrorDirNotFound //fmt.Errorf("Couldn't find bucket %q", f.bucket)
|
err = fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
f._bucketID = bucketID
|
f._bucketID = bucketID
|
||||||
return bucketID, err
|
return bucketID, err
|
||||||
|
@ -618,7 +618,7 @@ func (f *Fs) Mkdir() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Failed to create bucket: %v", err)
|
return errors.Wrap(err, "failed to create bucket")
|
||||||
}
|
}
|
||||||
f.setBucketID(response.ID)
|
f.setBucketID(response.ID)
|
||||||
return nil
|
return nil
|
||||||
|
@ -649,7 +649,7 @@ func (f *Fs) Rmdir() error {
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to delete bucket: %v", err)
|
return errors.Wrap(err, "failed to delete bucket")
|
||||||
}
|
}
|
||||||
f.clearBucketID()
|
f.clearBucketID()
|
||||||
f.clearUploadURL()
|
f.clearUploadURL()
|
||||||
|
@ -677,7 +677,7 @@ func (f *Fs) deleteByID(ID, Name string) error {
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to delete %q: %v", Name, err)
|
return errors.Wrapf(err, "failed to delete %q", Name)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -811,7 +811,7 @@ func (o *Object) readMetaData() (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info == nil {
|
if info == nil {
|
||||||
return fmt.Errorf("Object %q not found", o.remote)
|
return errors.Errorf("object %q not found", o.remote)
|
||||||
}
|
}
|
||||||
return o.decodeMetaData(info)
|
return o.decodeMetaData(info)
|
||||||
}
|
}
|
||||||
|
@ -905,14 +905,14 @@ func (file *openFile) Close() (err error) {
|
||||||
|
|
||||||
// Check to see we read the correct number of bytes
|
// Check to see we read the correct number of bytes
|
||||||
if file.o.Size() != file.bytes {
|
if file.o.Size() != file.bytes {
|
||||||
return fmt.Errorf("Object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the SHA1
|
// Check the SHA1
|
||||||
receivedSHA1 := file.resp.Header.Get(sha1Header)
|
receivedSHA1 := file.resp.Header.Get(sha1Header)
|
||||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||||
if receivedSHA1 != calculatedSHA1 {
|
if receivedSHA1 != calculatedSHA1 {
|
||||||
return fmt.Errorf("Object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -934,7 +934,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to open for download: %v", err)
|
return nil, errors.Wrap(err, "failed to open for download")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the time out of the headers if possible
|
// Parse the time out of the headers if possible
|
||||||
|
@ -1015,7 +1015,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n != size {
|
if n != size {
|
||||||
return fmt.Errorf("Read %d bytes expecting %d", n, size)
|
return errors.Errorf("read %d bytes expecting %d", n, size)
|
||||||
}
|
}
|
||||||
calculatedSha1 = fmt.Sprintf("%x", hash.Sum(nil))
|
calculatedSha1 = fmt.Sprintf("%x", hash.Sum(nil))
|
||||||
|
|
||||||
|
@ -1139,7 +1139,7 @@ func (o *Object) Remove() error {
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to delete file: %v", err)
|
return errors.Wrap(err, "failed to delete file")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,12 +4,12 @@ package dircache
|
||||||
// _methods are called without the lock
|
// _methods are called without the lock
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DirCache caches paths to directory IDs and vice versa
|
// DirCache caches paths to directory IDs and vice versa
|
||||||
|
@ -159,7 +159,7 @@ func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error
|
||||||
if create {
|
if create {
|
||||||
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
|
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Failed to make directory: %v", err)
|
return "", errors.Wrap(err, "failed to make directory")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return "", fs.ErrorDirNotFound
|
return "", fs.ErrorDirNotFound
|
||||||
|
@ -240,13 +240,13 @@ func (dc *DirCache) RootParentID() (string, error) {
|
||||||
dc.mu.Lock()
|
dc.mu.Lock()
|
||||||
defer dc.mu.Unlock()
|
defer dc.mu.Unlock()
|
||||||
if !dc.foundRoot {
|
if !dc.foundRoot {
|
||||||
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
|
return "", errors.New("internal error: RootID() called before FindRoot")
|
||||||
}
|
}
|
||||||
if dc.rootParentID == "" {
|
if dc.rootParentID == "" {
|
||||||
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
|
return "", errors.New("internal error: didn't find rootParentID")
|
||||||
}
|
}
|
||||||
if dc.rootID == dc.trueRootID {
|
if dc.rootID == dc.trueRootID {
|
||||||
return "", fmt.Errorf("Is root directory")
|
return "", errors.New("is root directory")
|
||||||
}
|
}
|
||||||
return dc.rootParentID, nil
|
return dc.rootParentID, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/oauthutil"
|
"github.com/ncw/rclone/oauthutil"
|
||||||
"github.com/ncw/rclone/pacer"
|
"github.com/ncw/rclone/pacer"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -220,7 +221,7 @@ OUTER:
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("Couldn't list directory: %s", err)
|
return false, errors.Wrap(err, "couldn't list directory")
|
||||||
}
|
}
|
||||||
for _, item := range files.Items {
|
for _, item := range files.Items {
|
||||||
if fn(item) {
|
if fn(item) {
|
||||||
|
@ -253,7 +254,7 @@ func (f *Fs) parseExtensions(extensions string) error {
|
||||||
for _, extension := range strings.Split(extensions, ",") {
|
for _, extension := range strings.Split(extensions, ",") {
|
||||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||||
if _, found := extensionToMimeType[extension]; !found {
|
if _, found := extensionToMimeType[extension]; !found {
|
||||||
return fmt.Errorf("Couldn't find mime type for extension %q", extension)
|
return errors.Errorf("couldn't find mime type for extension %q", extension)
|
||||||
}
|
}
|
||||||
found := false
|
found := false
|
||||||
for _, existingExtension := range f.extensions {
|
for _, existingExtension := range f.extensions {
|
||||||
|
@ -272,10 +273,10 @@ func (f *Fs) parseExtensions(extensions string) error {
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, path string) (fs.Fs, error) {
|
func NewFs(name, path string) (fs.Fs, error) {
|
||||||
if !isPowerOfTwo(int64(chunkSize)) {
|
if !isPowerOfTwo(int64(chunkSize)) {
|
||||||
return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
||||||
}
|
}
|
||||||
if chunkSize < 256*1024 {
|
if chunkSize < 256*1024 {
|
||||||
return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, driveConfig)
|
oAuthClient, _, err := oauthutil.NewClient(name, driveConfig)
|
||||||
|
@ -298,7 +299,7 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = drive.New(f.client)
|
f.svc, err = drive.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Couldn't create Drive client: %s", err)
|
return nil, errors.Wrap(err, "couldn't create Drive client")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read About so we know the root path
|
// Read About so we know the root path
|
||||||
|
@ -307,7 +308,7 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
|
return nil, errors.Wrap(err, "couldn't read info about Drive")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, f.about.RootFolderId, f)
|
f.dirCache = dircache.New(root, f.about.RootFolderId, f)
|
||||||
|
@ -602,7 +603,7 @@ func (f *Fs) Rmdir() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(children.Items) > 0 {
|
if len(children.Items) > 0 {
|
||||||
return fmt.Errorf("Directory not empty: %#v", children.Items)
|
return errors.Errorf("directory not empty: %#v", children.Items)
|
||||||
}
|
}
|
||||||
// Delete the directory if it isn't the root
|
// Delete the directory if it isn't the root
|
||||||
if f.root != "" {
|
if f.root != "" {
|
||||||
|
@ -643,7 +644,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
if srcObj.isDocument {
|
if srcObj.isDocument {
|
||||||
return nil, fmt.Errorf("Can't copy a Google document")
|
return nil, errors.New("can't copy a Google document")
|
||||||
}
|
}
|
||||||
|
|
||||||
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
||||||
|
@ -671,7 +672,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge() error {
|
||||||
if f.root == "" {
|
if f.root == "" {
|
||||||
return fmt.Errorf("Can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
err := f.dirCache.FindRoot(false)
|
err := f.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -708,7 +709,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
if srcObj.isDocument {
|
if srcObj.isDocument {
|
||||||
return nil, fmt.Errorf("Can't move a Google document")
|
return nil, errors.New("can't move a Google document")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporary FsObject under construction
|
// Temporary FsObject under construction
|
||||||
|
@ -857,7 +858,7 @@ func (o *Object) readMetaData() (err error) {
|
||||||
}
|
}
|
||||||
if !found {
|
if !found {
|
||||||
fs.Debug(o, "Couldn't find object")
|
fs.Debug(o, "Couldn't find object")
|
||||||
return fmt.Errorf("Couldn't find object")
|
return errors.New("couldn't find object")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -914,7 +915,7 @@ func (o *Object) Storable() bool {
|
||||||
// using the method passed in
|
// using the method passed in
|
||||||
func (o *Object) httpResponse(method string) (res *http.Response, err error) {
|
func (o *Object) httpResponse(method string) (res *http.Response, err error) {
|
||||||
if o.url == "" {
|
if o.url == "" {
|
||||||
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
|
return nil, errors.New("forbidden to download - check sharing permission")
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest(method, o.url, nil)
|
req, err := http.NewRequest(method, o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -970,7 +971,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||||
}
|
}
|
||||||
if res.StatusCode != 200 {
|
if res.StatusCode != 200 {
|
||||||
_ = res.Body.Close() // ignore error
|
_ = res.Body.Close() // ignore error
|
||||||
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
|
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||||
}
|
}
|
||||||
// If it is a document, update the size with what we are
|
// If it is a document, update the size with what we are
|
||||||
// reading as it can change from the HEAD in the listing to
|
// reading as it can change from the HEAD in the listing to
|
||||||
|
@ -991,7 +992,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime()
|
||||||
if o.isDocument {
|
if o.isDocument {
|
||||||
return fmt.Errorf("Can't update a google document")
|
return errors.New("can't update a google document")
|
||||||
}
|
}
|
||||||
updateInfo := &drive.File{
|
updateInfo := &drive.File{
|
||||||
Id: o.id,
|
Id: o.id,
|
||||||
|
@ -1025,7 +1026,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove() error {
|
||||||
if o.isDocument {
|
if o.isDocument {
|
||||||
return fmt.Errorf("Can't delete a google document")
|
return errors.New("can't delete a google document")
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"google.golang.org/api/drive/v2"
|
"google.golang.org/api/drive/v2"
|
||||||
)
|
)
|
||||||
|
@ -17,11 +17,15 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||||
{"doc", []string{"doc"}, nil},
|
{"doc", []string{"doc"}, nil},
|
||||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||||
{"docx,potato,docx", []string{"docx"}, fmt.Errorf(`Couldn't find mime type for extension "potato"`)},
|
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||||
} {
|
} {
|
||||||
f := new(Fs)
|
f := new(Fs)
|
||||||
gotErr := f.parseExtensions(test.in)
|
gotErr := f.parseExtensions(test.in)
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
if test.wantErr == nil {
|
||||||
|
assert.NoError(t, gotErr)
|
||||||
|
} else {
|
||||||
|
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||||
|
}
|
||||||
assert.Equal(t, test.want, f.extensions)
|
assert.Equal(t, test.want, f.extensions)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/api/drive/v2"
|
"google.golang.org/api/drive/v2"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
|
@ -138,7 +139,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
|
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
|
||||||
}
|
}
|
||||||
Range := res.Header.Get("Range")
|
Range := res.Header.Get("Range")
|
||||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||||
|
@ -147,7 +148,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||||
return start, nil
|
return start, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, fmt.Errorf("unable to parse range %q", Range)
|
return 0, errors.Errorf("unable to parse range %q", Range)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||||
|
|
|
@ -9,7 +9,6 @@ File system is case insensitive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -21,6 +20,7 @@ import (
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/oauthutil"
|
"github.com/ncw/rclone/oauthutil"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"github.com/stacktic/dropbox"
|
"github.com/stacktic/dropbox"
|
||||||
)
|
)
|
||||||
|
@ -148,7 +148,7 @@ func newDropbox(name string) (*dropbox.Dropbox, error) {
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string) (fs.Fs, error) {
|
||||||
if uploadChunkSize > maxUploadChunkSize {
|
if uploadChunkSize > maxUploadChunkSize {
|
||||||
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
|
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
||||||
}
|
}
|
||||||
db, err := newDropbox(name)
|
db, err := newDropbox(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -239,7 +239,7 @@ func strip(path, root string) (string, error) {
|
||||||
}
|
}
|
||||||
lowercase := strings.ToLower(path)
|
lowercase := strings.ToLower(path)
|
||||||
if !strings.HasPrefix(lowercase, root) {
|
if !strings.HasPrefix(lowercase, root) {
|
||||||
return "", fmt.Errorf("Path %q is not under root %q", path, root)
|
return "", errors.Errorf("path %q is not under root %q", path, root)
|
||||||
}
|
}
|
||||||
return path[len(root):], nil
|
return path[len(root):], nil
|
||||||
}
|
}
|
||||||
|
@ -267,11 +267,11 @@ func (f *Fs) list(out fs.ListOpts, dir string) {
|
||||||
for {
|
for {
|
||||||
deltaPage, err := f.db.Delta(cursor, root)
|
deltaPage, err := f.db.Delta(cursor, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.SetError(fmt.Errorf("Couldn't list: %s", err))
|
out.SetError(errors.Wrap(err, "couldn't list"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if deltaPage.Reset && cursor != "" {
|
if deltaPage.Reset && cursor != "" {
|
||||||
err = errors.New("Unexpected reset during listing")
|
err = errors.New("unexpected reset during listing")
|
||||||
out.SetError(err)
|
out.SetError(err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -368,7 +368,7 @@ func (f *Fs) listOneLevel(out fs.ListOpts, dir string) {
|
||||||
}
|
}
|
||||||
entry, err := f.db.Metadata(root, true, false, "", "", metadataLimit)
|
entry, err := f.db.Metadata(root, true, false, "", "", metadataLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.SetError(fmt.Errorf("Couldn't list single level: %s", err))
|
out.SetError(errors.Wrap(err, "couldn't list single level"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := range entry.Contents {
|
for i := range entry.Contents {
|
||||||
|
@ -448,7 +448,7 @@ func (f *Fs) Mkdir() error {
|
||||||
if entry.IsDir {
|
if entry.IsDir {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("%q already exists as file", f.root)
|
return errors.Errorf("%q already exists as file", f.root)
|
||||||
}
|
}
|
||||||
_, err = f.db.CreateFolder(f.slashRoot)
|
_, err = f.db.CreateFolder(f.slashRoot)
|
||||||
return err
|
return err
|
||||||
|
@ -463,7 +463,7 @@ func (f *Fs) Rmdir() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(entry.Contents) != 0 {
|
if len(entry.Contents) != 0 {
|
||||||
return errors.New("Directory not empty")
|
return errors.New("directory not empty")
|
||||||
}
|
}
|
||||||
return f.Purge()
|
return f.Purge()
|
||||||
}
|
}
|
||||||
|
@ -499,7 +499,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstPath := dstObj.remotePath()
|
dstPath := dstObj.remotePath()
|
||||||
entry, err := f.db.Copy(srcPath, dstPath, false)
|
entry, err := f.db.Copy(srcPath, dstPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Copy failed: %s", err)
|
return nil, errors.Wrap(err, "copy failed")
|
||||||
}
|
}
|
||||||
dstObj.setMetadataFromEntry(entry)
|
dstObj.setMetadataFromEntry(entry)
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
|
@ -542,7 +542,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstPath := dstObj.remotePath()
|
dstPath := dstObj.remotePath()
|
||||||
entry, err := f.db.Move(srcPath, dstPath)
|
entry, err := f.db.Move(srcPath, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Move failed: %s", err)
|
return nil, errors.Wrap(err, "move failed")
|
||||||
}
|
}
|
||||||
dstObj.setMetadataFromEntry(entry)
|
dstObj.setMetadataFromEntry(entry)
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
|
@ -571,7 +571,7 @@ func (f *Fs) DirMove(src fs.Fs) error {
|
||||||
// Do the move
|
// Do the move
|
||||||
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
|
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("MoveDir failed: %v", err)
|
return errors.Wrap(err, "MoveDir failed")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -625,7 +625,7 @@ func (o *Object) readEntry() (*dropbox.Entry, error) {
|
||||||
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debug(o, "Error reading file: %s", err)
|
fs.Debug(o, "Error reading file: %s", err)
|
||||||
return nil, fmt.Errorf("Error reading file: %s", err)
|
return nil, errors.Wrap(err, "error reading file")
|
||||||
}
|
}
|
||||||
return entry, nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
|
@ -717,7 +717,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
|
||||||
}
|
}
|
||||||
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Upload failed: %s", err)
|
return errors.Wrap(err, "upload failed")
|
||||||
}
|
}
|
||||||
o.setMetadataFromEntry(entry)
|
o.setMetadataFromEntry(entry)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// asyncReader will do async read-ahead from the input reader
|
// asyncReader will do async read-ahead from the input reader
|
||||||
|
@ -29,13 +30,13 @@ type asyncReader struct {
|
||||||
// When done use Close to release the buffers and close the supplied input.
|
// When done use Close to release the buffers and close the supplied input.
|
||||||
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
|
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
|
||||||
if size <= 0 {
|
if size <= 0 {
|
||||||
return nil, fmt.Errorf("buffer size too small")
|
return nil, errors.New("buffer size too small")
|
||||||
}
|
}
|
||||||
if buffers <= 0 {
|
if buffers <= 0 {
|
||||||
return nil, fmt.Errorf("number of buffers too small")
|
return nil, errors.New("number of buffers too small")
|
||||||
}
|
}
|
||||||
if rd == nil {
|
if rd == nil {
|
||||||
return nil, fmt.Errorf("nil reader supplied")
|
return nil, errors.New("nil reader supplied")
|
||||||
}
|
}
|
||||||
a := &asyncReader{}
|
a := &asyncReader{}
|
||||||
a.init(rd, buffers, size)
|
a.init(rd, buffers, size)
|
||||||
|
|
19
fs/config.go
19
fs/config.go
|
@ -26,6 +26,7 @@ import (
|
||||||
|
|
||||||
"github.com/Unknwon/goconfig"
|
"github.com/Unknwon/goconfig"
|
||||||
"github.com/mreiferson/go-httpclient"
|
"github.com/mreiferson/go-httpclient"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
|
@ -128,7 +129,7 @@ func (x SizeSuffix) String() string {
|
||||||
// Set a SizeSuffix
|
// Set a SizeSuffix
|
||||||
func (x *SizeSuffix) Set(s string) error {
|
func (x *SizeSuffix) Set(s string) error {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return fmt.Errorf("Empty string")
|
return errors.New("empty string")
|
||||||
}
|
}
|
||||||
if strings.ToLower(s) == "off" {
|
if strings.ToLower(s) == "off" {
|
||||||
*x = -1
|
*x = -1
|
||||||
|
@ -150,7 +151,7 @@ func (x *SizeSuffix) Set(s string) error {
|
||||||
case 'g', 'G':
|
case 'g', 'G':
|
||||||
multiplier = 1 << 30
|
multiplier = 1 << 30
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Bad suffix %q", suffix)
|
return errors.Errorf("bad suffix %q", suffix)
|
||||||
}
|
}
|
||||||
s = s[:len(s)-suffixLen]
|
s = s[:len(s)-suffixLen]
|
||||||
value, err := strconv.ParseFloat(s, 64)
|
value, err := strconv.ParseFloat(s, 64)
|
||||||
|
@ -158,7 +159,7 @@ func (x *SizeSuffix) Set(s string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if value < 0 {
|
if value < 0 {
|
||||||
return fmt.Errorf("Size can't be negative %q", s)
|
return errors.Errorf("size can't be negative %q", s)
|
||||||
}
|
}
|
||||||
value *= multiplier
|
value *= multiplier
|
||||||
*x = SizeSuffix(value)
|
*x = SizeSuffix(value)
|
||||||
|
@ -402,7 +403,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
|
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
|
||||||
return nil, fmt.Errorf("Unsupported configuration encryption. Update rclone for support.")
|
return nil, errors.New("unsupported configuration encryption - update rclone for support")
|
||||||
}
|
}
|
||||||
return goconfig.LoadFromReader(bytes.NewBuffer(b))
|
return goconfig.LoadFromReader(bytes.NewBuffer(b))
|
||||||
}
|
}
|
||||||
|
@ -411,10 +412,10 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||||
dec := base64.NewDecoder(base64.StdEncoding, r)
|
dec := base64.NewDecoder(base64.StdEncoding, r)
|
||||||
box, err := ioutil.ReadAll(dec)
|
box, err := ioutil.ReadAll(dec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to load base64 encoded data: %v", err)
|
return nil, errors.Wrap(err, "failed to load base64 encoded data")
|
||||||
}
|
}
|
||||||
if len(box) < 24+secretbox.Overhead {
|
if len(box) < 24+secretbox.Overhead {
|
||||||
return nil, fmt.Errorf("Configuration data too short")
|
return nil, errors.New("Configuration data too short")
|
||||||
}
|
}
|
||||||
envpw := os.Getenv("RCLONE_CONFIG_PASS")
|
envpw := os.Getenv("RCLONE_CONFIG_PASS")
|
||||||
|
|
||||||
|
@ -431,7 +432,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||||
}
|
}
|
||||||
if len(configKey) == 0 {
|
if len(configKey) == 0 {
|
||||||
if !*AskPassword {
|
if !*AskPassword {
|
||||||
return nil, fmt.Errorf("Unable to decrypt configuration and not allowed to ask for password. Set RCLONE_CONFIG_PASS to your configuration password.")
|
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||||
}
|
}
|
||||||
getPassword("Enter configuration password:")
|
getPassword("Enter configuration password:")
|
||||||
}
|
}
|
||||||
|
@ -479,7 +480,7 @@ func getPassword(q string) {
|
||||||
// zero after trimming+normalization, an error is returned.
|
// zero after trimming+normalization, an error is returned.
|
||||||
func setPassword(password string) error {
|
func setPassword(password string) error {
|
||||||
if !utf8.ValidString(password) {
|
if !utf8.ValidString(password) {
|
||||||
return fmt.Errorf("Password contains invalid utf8 characters")
|
return errors.New("password contains invalid utf8 characters")
|
||||||
}
|
}
|
||||||
// Remove leading+trailing whitespace
|
// Remove leading+trailing whitespace
|
||||||
password = strings.TrimSpace(password)
|
password = strings.TrimSpace(password)
|
||||||
|
@ -487,7 +488,7 @@ func setPassword(password string) error {
|
||||||
// Normalize to reduce weird variations.
|
// Normalize to reduce weird variations.
|
||||||
password = norm.NFKC.String(password)
|
password = norm.NFKC.String(password)
|
||||||
if len(password) == 0 {
|
if len(password) == 0 {
|
||||||
return fmt.Errorf("No characters in password")
|
return errors.New("no characters in password")
|
||||||
}
|
}
|
||||||
// Create SHA256 has of the password
|
// Create SHA256 has of the password
|
||||||
sha := sha256.New()
|
sha := sha256.New()
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -227,7 +228,7 @@ func NewFilter() (f *Filter, err error) {
|
||||||
}
|
}
|
||||||
f.ModTimeFrom = time.Now().Add(-duration)
|
f.ModTimeFrom = time.Now().Add(-duration)
|
||||||
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
|
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
|
||||||
return nil, fmt.Errorf("Argument --min-age can't be larger than --max-age")
|
return nil, errors.New("argument --min-age can't be larger than --max-age")
|
||||||
}
|
}
|
||||||
Debug(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
|
Debug(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
|
||||||
}
|
}
|
||||||
|
@ -306,7 +307,7 @@ func (f *Filter) AddRule(rule string) error {
|
||||||
case strings.HasPrefix(rule, "+ "):
|
case strings.HasPrefix(rule, "+ "):
|
||||||
return f.Add(true, rule[2:])
|
return f.Add(true, rule[2:])
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Malformed rule %q", rule)
|
return errors.Errorf("malformed rule %q", rule)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddFile adds a single file to the files from list
|
// AddFile adds a single file to the files from list
|
||||||
|
|
26
fs/fs.go
26
fs/fs.go
|
@ -11,6 +11,8 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
|
@ -29,17 +31,17 @@ var (
|
||||||
// Filesystem registry
|
// Filesystem registry
|
||||||
fsRegistry []*RegInfo
|
fsRegistry []*RegInfo
|
||||||
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
|
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
|
||||||
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
|
ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
|
||||||
ErrorCantPurge = fmt.Errorf("Can't purge directory")
|
ErrorCantPurge = errors.New("can't purge directory")
|
||||||
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
|
ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
|
||||||
ErrorCantMove = fmt.Errorf("Can't move object - incompatible remotes")
|
ErrorCantMove = errors.New("can't move object - incompatible remotes")
|
||||||
ErrorCantDirMove = fmt.Errorf("Can't move directory - incompatible remotes")
|
ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
|
||||||
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
|
ErrorDirExists = errors.New("can't copy directory - destination already exists")
|
||||||
ErrorCantSetModTime = fmt.Errorf("Can't set modified time")
|
ErrorCantSetModTime = errors.New("can't set modified time")
|
||||||
ErrorDirNotFound = fmt.Errorf("Directory not found")
|
ErrorDirNotFound = errors.New("directory not found")
|
||||||
ErrorLevelNotSupported = fmt.Errorf("Level value not supported")
|
ErrorLevelNotSupported = errors.New("level value not supported")
|
||||||
ErrorListAborted = fmt.Errorf("List aborted")
|
ErrorListAborted = errors.New("list aborted")
|
||||||
ErrorListOnlyRoot = fmt.Errorf("Can only list from root")
|
ErrorListOnlyRoot = errors.New("can only list from root")
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegInfo provides information about a filesystem
|
// RegInfo provides information about a filesystem
|
||||||
|
@ -323,7 +325,7 @@ func Find(name string) (*RegInfo, error) {
|
||||||
return item, nil
|
return item, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Didn't find filing system for %q", name)
|
return nil, errors.Errorf("didn't find filing system for %q", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pattern to match an rclone url
|
// Pattern to match an rclone url
|
||||||
|
|
17
fs/glob.go
17
fs/glob.go
|
@ -4,9 +4,10 @@ package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// globToRegexp converts an rsync style glob to a regexp
|
// globToRegexp converts an rsync style glob to a regexp
|
||||||
|
@ -29,7 +30,7 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||||
case 2:
|
case 2:
|
||||||
_, _ = re.WriteString(`.*`)
|
_, _ = re.WriteString(`.*`)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("too many stars in %q", glob)
|
return errors.Errorf("too many stars in %q", glob)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
consecutiveStars = 0
|
consecutiveStars = 0
|
||||||
|
@ -72,16 +73,16 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||||
_, _ = re.WriteRune(c)
|
_, _ = re.WriteRune(c)
|
||||||
inBrackets++
|
inBrackets++
|
||||||
case ']':
|
case ']':
|
||||||
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
|
return nil, errors.Errorf("mismatched ']' in glob %q", glob)
|
||||||
case '{':
|
case '{':
|
||||||
if inBraces {
|
if inBraces {
|
||||||
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
|
return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
|
||||||
}
|
}
|
||||||
inBraces = true
|
inBraces = true
|
||||||
_, _ = re.WriteRune('(')
|
_, _ = re.WriteRune('(')
|
||||||
case '}':
|
case '}':
|
||||||
if !inBraces {
|
if !inBraces {
|
||||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||||
}
|
}
|
||||||
_, _ = re.WriteRune(')')
|
_, _ = re.WriteRune(')')
|
||||||
inBraces = false
|
inBraces = false
|
||||||
|
@ -103,15 +104,15 @@ func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if inBrackets > 0 {
|
if inBrackets > 0 {
|
||||||
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
|
return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
|
||||||
}
|
}
|
||||||
if inBraces {
|
if inBraces {
|
||||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||||
}
|
}
|
||||||
_, _ = re.WriteRune('$')
|
_, _ = re.WriteRune('$')
|
||||||
result, err := regexp.Compile(re.String())
|
result, err := regexp.Compile(re.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
|
return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ func TestGlobToRegexp(t *testing.T) {
|
||||||
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
|
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||||
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
|
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||||
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
|
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
|
||||||
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
|
{`[a--b]`, `(^|/)`, `bad glob pattern`},
|
||||||
{`a\*b`, `(^|/)a\*b$`, ``},
|
{`a\*b`, `(^|/)a\*b$`, ``},
|
||||||
{`a\\b`, `(^|/)a\\b$`, ``},
|
{`a\\b`, `(^|/)a\\b$`, ``},
|
||||||
} {
|
} {
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HashType indicates a standard hashing algorithm
|
// HashType indicates a standard hashing algorithm
|
||||||
|
@ -15,7 +17,7 @@ type HashType int
|
||||||
|
|
||||||
// ErrHashUnsupported should be returned by filesystem,
|
// ErrHashUnsupported should be returned by filesystem,
|
||||||
// if it is requested to deliver an unsupported hash type.
|
// if it is requested to deliver an unsupported hash type.
|
||||||
var ErrHashUnsupported = fmt.Errorf("hash type not supported")
|
var ErrHashUnsupported = errors.New("hash type not supported")
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// HashMD5 indicates MD5 support
|
// HashMD5 indicates MD5 support
|
||||||
|
@ -82,7 +84,7 @@ func (h HashType) String() string {
|
||||||
// and this function must support all types.
|
// and this function must support all types.
|
||||||
func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
|
func hashFromTypes(set HashSet) (map[HashType]hash.Hash, error) {
|
||||||
if !set.SubsetOf(SupportedHashes) {
|
if !set.SubsetOf(SupportedHashes) {
|
||||||
return nil, fmt.Errorf("Requested set %08x contains unknown hash types", int(set))
|
return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
|
||||||
}
|
}
|
||||||
var hashers = make(map[HashType]hash.Hash)
|
var hashers = make(map[HashType]hash.Hash)
|
||||||
types := set.Array()
|
types := set.Array()
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Limited defines a Fs which can only return the Objects passed in
|
// Limited defines a Fs which can only return the Objects passed in
|
||||||
|
@ -70,7 +72,7 @@ func (f *Limited) Put(in io.Reader, src ObjectInfo) (Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
obj := f.NewFsObject(remote)
|
obj := f.NewFsObject(remote)
|
||||||
if obj == nil {
|
if obj == nil {
|
||||||
return nil, fmt.Errorf("Can't create %q in limited fs", remote)
|
return nil, errors.Errorf("can't create %q in limited fs", remote)
|
||||||
}
|
}
|
||||||
return obj, obj.Update(in, src)
|
return obj, obj.Update(in, src)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package fs
|
package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -17,7 +17,7 @@ func TestListerNew(t *testing.T) {
|
||||||
assert.Equal(t, MaxLevel, o.level)
|
assert.Equal(t, MaxLevel, o.level)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errNotImpl = errors.New("Not implemented")
|
var errNotImpl = errors.New("not implemented")
|
||||||
|
|
||||||
type mockObject string
|
type mockObject string
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,8 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"golang.org/x/text/unicode/norm"
|
"golang.org/x/text/unicode/norm"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -261,7 +263,7 @@ tryAgain:
|
||||||
// Verify sizes are the same after transfer
|
// Verify sizes are the same after transfer
|
||||||
if src.Size() != dst.Size() {
|
if src.Size() != dst.Size() {
|
||||||
Stats.Error()
|
Stats.Error()
|
||||||
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||||
ErrorLog(dst, "%s", err)
|
ErrorLog(dst, "%s", err)
|
||||||
removeFailedCopy(dst)
|
removeFailedCopy(dst)
|
||||||
return
|
return
|
||||||
|
@ -287,7 +289,7 @@ tryAgain:
|
||||||
ErrorLog(dst, "Failed to read hash: %s", err)
|
ErrorLog(dst, "Failed to read hash: %s", err)
|
||||||
} else if !HashEquals(srcSum, dstSum) {
|
} else if !HashEquals(srcSum, dstSum) {
|
||||||
Stats.Error()
|
Stats.Error()
|
||||||
err = fmt.Errorf("Corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
|
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
|
||||||
ErrorLog(dst, "%s", err)
|
ErrorLog(dst, "%s", err)
|
||||||
removeFailedCopy(dst)
|
removeFailedCopy(dst)
|
||||||
return
|
return
|
||||||
|
@ -796,7 +798,7 @@ func Check(fdst, fsrc Fs) error {
|
||||||
checkerWg.Wait()
|
checkerWg.Wait()
|
||||||
Log(fdst, "%d differences found", Stats.GetErrors())
|
Log(fdst, "%d differences found", Stats.GetErrors())
|
||||||
if differences > 0 {
|
if differences > 0 {
|
||||||
return fmt.Errorf("%d differences found", differences)
|
return errors.Errorf("%d differences found", differences)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -27,13 +26,13 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/oauthutil"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/storage/v1"
|
"google.golang.org/api/storage/v1"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/oauthutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -182,7 +181,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||||
func parsePath(path string) (bucket, directory string, err error) {
|
func parsePath(path string) (bucket, directory string, err error) {
|
||||||
parts := matcher.FindStringSubmatch(path)
|
parts := matcher.FindStringSubmatch(path)
|
||||||
if parts == nil {
|
if parts == nil {
|
||||||
err = fmt.Errorf("Couldn't find bucket in storage path %q", path)
|
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
||||||
} else {
|
} else {
|
||||||
bucket, directory = parts[1], parts[2]
|
bucket, directory = parts[1], parts[2]
|
||||||
directory = strings.Trim(directory, "/")
|
directory = strings.Trim(directory, "/")
|
||||||
|
@ -193,11 +192,11 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||||
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||||
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error opening credentials file: %v", err)
|
return nil, errors.Wrap(err, "error opening credentials file")
|
||||||
}
|
}
|
||||||
conf, err := google.JWTConfigFromJSON(data, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(data, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error processing credentials: %v", err)
|
return nil, errors.Wrap(err, "error processing credentials")
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context()
|
ctxWithSpecialClient := oauthutil.Context()
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
|
@ -245,7 +244,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
f.svc, err = storage.New(f.client)
|
f.svc, err = storage.New(f.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
|
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.root != "" {
|
if f.root != "" {
|
||||||
|
@ -357,7 +356,7 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
|
||||||
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
|
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
|
||||||
defer out.Finished()
|
defer out.Finished()
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
out.SetError(fmt.Errorf("Can't list objects at root - choose a bucket using lsd"))
|
out.SetError(errors.New("can't list objects at root - choose a bucket using lsd"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// List the objects
|
// List the objects
|
||||||
|
@ -398,7 +397,7 @@ func (f *Fs) listBuckets(out fs.ListOpts, dir string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if f.projectNumber == "" {
|
if f.projectNumber == "" {
|
||||||
out.SetError(errors.New("Can't list buckets without project number"))
|
out.SetError(errors.New("can't list buckets without project number"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||||
|
@ -458,7 +457,7 @@ func (f *Fs) Mkdir() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.projectNumber == "" {
|
if f.projectNumber == "" {
|
||||||
return fmt.Errorf("Can't make bucket without project number")
|
return errors.New("can't make bucket without project number")
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := storage.Bucket{
|
bucket := storage.Bucket{
|
||||||
|
@ -670,7 +669,7 @@ func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||||
}
|
}
|
||||||
if res.StatusCode != 200 {
|
if res.StatusCode != 200 {
|
||||||
_ = res.Body.Close() // ignore error
|
_ = res.Body.Close() // ignore error
|
||||||
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
|
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||||
}
|
}
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/ncw/rclone/oauthutil"
|
"github.com/ncw/rclone/oauthutil"
|
||||||
"github.com/ncw/rclone/swift"
|
"github.com/ncw/rclone/swift"
|
||||||
swiftLib "github.com/ncw/swift"
|
swiftLib "github.com/ncw/swift"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -118,7 +119,7 @@ func (f *Fs) getCredentials() (err error) {
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(resp.Body, &err)
|
defer fs.CheckClose(resp.Body, &err)
|
||||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
return fmt.Errorf("Failed to get credentials: %s", resp.Status)
|
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
||||||
}
|
}
|
||||||
decoder := json.NewDecoder(resp.Body)
|
decoder := json.NewDecoder(resp.Body)
|
||||||
var result credentials
|
var result credentials
|
||||||
|
@ -128,7 +129,7 @@ func (f *Fs) getCredentials() (err error) {
|
||||||
}
|
}
|
||||||
// fs.Debug(f, "Got credentials %+v", result)
|
// fs.Debug(f, "Got credentials %+v", result)
|
||||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||||
return fmt.Errorf("Couldn't read token, result and expired from credentials")
|
return errors.New("couldn't read token, result and expired from credentials")
|
||||||
}
|
}
|
||||||
f.credentials = result
|
f.credentials = result
|
||||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||||
|
@ -144,7 +145,7 @@ func (f *Fs) getCredentials() (err error) {
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string) (fs.Fs, error) {
|
||||||
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to configure Hubic: %v", err)
|
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
|
@ -161,7 +162,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
}
|
}
|
||||||
err = c.Authenticate()
|
err = c.Authenticate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error authenticating swift connection: %v", err)
|
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make inner swift Fs from the connection
|
// Make inner swift Fs from the connection
|
||||||
|
|
|
@ -80,7 +80,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
f.root, remote = getDirFile(f.root)
|
f.root, remote = getDirFile(f.root)
|
||||||
obj := f.NewFsObject(remote)
|
obj := f.NewFsObject(remote)
|
||||||
if obj == nil {
|
if obj == nil {
|
||||||
return nil, fmt.Errorf("Failed to make object for %q in %q", remote, f.root)
|
return nil, errors.Errorf("failed to make object for %q in %q", remote, f.root)
|
||||||
}
|
}
|
||||||
// return a Fs Limited to this object
|
// return a Fs Limited to this object
|
||||||
return fs.NewLimited(f, obj), nil
|
return fs.NewLimited(f, obj), nil
|
||||||
|
@ -368,7 +368,7 @@ func (f *Fs) Purge() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !fi.Mode().IsDir() {
|
if !fi.Mode().IsDir() {
|
||||||
return fmt.Errorf("Can't Purge non directory: %q", f.root)
|
return errors.Errorf("can't purge non directory: %q", f.root)
|
||||||
}
|
}
|
||||||
return os.RemoveAll(f.root)
|
return os.RemoveAll(f.root)
|
||||||
}
|
}
|
||||||
|
@ -400,7 +400,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if !dstObj.info.Mode().IsRegular() {
|
} else if !dstObj.info.Mode().IsRegular() {
|
||||||
// It isn't a file
|
// It isn't a file
|
||||||
return nil, fmt.Errorf("Can't move file onto non-file")
|
return nil, errors.New("can't move file onto non-file")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create destination
|
// Create destination
|
||||||
|
@ -490,7 +490,7 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
|
||||||
oldsize := o.info.Size()
|
oldsize := o.info.Size()
|
||||||
err := o.lstat()
|
err := o.lstat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "Hash failed to stat")
|
return "", errors.Wrap(err, "hash: failed to stat")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() {
|
if !o.info.ModTime().Equal(oldtime) || oldsize != o.info.Size() {
|
||||||
|
@ -501,15 +501,15 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
|
||||||
o.hashes = make(map[fs.HashType]string)
|
o.hashes = make(map[fs.HashType]string)
|
||||||
in, err := os.Open(o.path)
|
in, err := os.Open(o.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "Hash failed to open")
|
return "", errors.Wrap(err, "hash: failed to open")
|
||||||
}
|
}
|
||||||
o.hashes, err = fs.HashStream(in)
|
o.hashes, err = fs.HashStream(in)
|
||||||
closeErr := in.Close()
|
closeErr := in.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "Hash failed to read")
|
return "", errors.Wrap(err, "hash: failed to read")
|
||||||
}
|
}
|
||||||
if closeErr != nil {
|
if closeErr != nil {
|
||||||
return "", errors.Wrap(closeErr, "Hash failed to close")
|
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return o.hashes[r], nil
|
return o.hashes[r], nil
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/skratchdot/open-golang/open"
|
"github.com/skratchdot/open-golang/open"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
@ -58,7 +59,7 @@ func getToken(name string) (*oauth2.Token, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if tokenString == "" {
|
if tokenString == "" {
|
||||||
return nil, fmt.Errorf("Empty token found - please run rclone config again")
|
return nil, errors.New("empty token found - please run rclone config again")
|
||||||
}
|
}
|
||||||
token := new(oauth2.Token)
|
token := new(oauth2.Token)
|
||||||
err = json.Unmarshal([]byte(tokenString), token)
|
err = json.Unmarshal([]byte(tokenString), token)
|
||||||
|
@ -301,7 +302,7 @@ func Config(id, name string, config *oauth2.Config) error {
|
||||||
if authCode != "" {
|
if authCode != "" {
|
||||||
fmt.Printf("Got code\n")
|
fmt.Printf("Got code\n")
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Failed to get code")
|
return errors.New("failed to get code")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Read the code, and exchange it for a token.
|
// Read the code, and exchange it for a token.
|
||||||
|
@ -310,14 +311,14 @@ func Config(id, name string, config *oauth2.Config) error {
|
||||||
}
|
}
|
||||||
token, err := config.Exchange(oauth2.NoContext, authCode)
|
token, err := config.Exchange(oauth2.NoContext, authCode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to get token: %v", err)
|
return errors.Wrap(err, "failed to get token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print code if we do automatic retrieval
|
// Print code if we do automatic retrieval
|
||||||
if automatic {
|
if automatic {
|
||||||
result, err := json.Marshal(token)
|
result, err := json.Marshal(token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to marshal token: %v", err)
|
return errors.Wrap(err, "failed to marshal token")
|
||||||
}
|
}
|
||||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
||||||
}
|
}
|
||||||
|
|
|
@ -187,7 +187,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
// Get rootID
|
// Get rootID
|
||||||
rootInfo, _, err := f.readMetaDataForPath("")
|
rootInfo, _, err := f.readMetaDataForPath("")
|
||||||
if err != nil || rootInfo.ID == "" {
|
if err != nil || rootInfo.ID == "" {
|
||||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
return nil, errors.Wrap(err, "failed to get root")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
||||||
|
@ -258,7 +258,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
||||||
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
parent, ok := f.dirCache.GetInv(pathID)
|
parent, ok := f.dirCache.GetInv(pathID)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", false, fmt.Errorf("Couldn't find parent ID")
|
return "", false, errors.New("couldn't find parent ID")
|
||||||
}
|
}
|
||||||
path := leaf
|
path := leaf
|
||||||
if parent != "" {
|
if parent != "" {
|
||||||
|
@ -275,7 +275,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
||||||
return "", false, err
|
return "", false, err
|
||||||
}
|
}
|
||||||
if info.Folder == nil {
|
if info.Folder == nil {
|
||||||
return "", false, fmt.Errorf("Found file when looking for folder")
|
return "", false, errors.New("found file when looking for folder")
|
||||||
}
|
}
|
||||||
return info.ID, true, nil
|
return info.ID, true, nil
|
||||||
}
|
}
|
||||||
|
@ -467,7 +467,7 @@ func (f *Fs) deleteObject(id string) error {
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(check bool) error {
|
func (f *Fs) purgeCheck(check bool) error {
|
||||||
if f.root == "" {
|
if f.root == "" {
|
||||||
return fmt.Errorf("Can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(false)
|
||||||
|
@ -480,10 +480,10 @@ func (f *Fs) purgeCheck(check bool) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if item.Folder == nil {
|
if item.Folder == nil {
|
||||||
return fmt.Errorf("Not a folder")
|
return errors.New("not a folder")
|
||||||
}
|
}
|
||||||
if check && item.Folder.ChildCount != 0 {
|
if check && item.Folder.ChildCount != 0 {
|
||||||
return fmt.Errorf("Folder not empty")
|
return errors.New("folder not empty")
|
||||||
}
|
}
|
||||||
err = f.deleteObject(rootID)
|
err = f.deleteObject(rootID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -533,7 +533,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if status.Status == "failed" || status.Status == "deleteFailed" {
|
if status.Status == "failed" || status.Status == "deleteFailed" {
|
||||||
return fmt.Errorf("Async operation %q returned %q", status.Operation, status.Status)
|
return errors.Errorf("async operation %q returned %q", status.Operation, status.Status)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var info api.Item
|
var info api.Item
|
||||||
|
@ -546,7 +546,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
|
||||||
}
|
}
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Async operation didn't complete after %v", fs.Config.Timeout)
|
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
|
@ -601,7 +601,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
// read location header
|
// read location header
|
||||||
location := resp.Header.Get("Location")
|
location := resp.Header.Get("Location")
|
||||||
if location == "" {
|
if location == "" {
|
||||||
return nil, fmt.Errorf("Didn't receive location header in copy response")
|
return nil, errors.New("didn't receive location header in copy response")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for job to finish
|
// Wait for job to finish
|
||||||
|
@ -764,7 +764,7 @@ func (o *Object) Storable() bool {
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return nil, fmt.Errorf("Can't download no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
|
@ -834,7 +834,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||||
// uploadMultipart uploads a file using multipart upload
|
// uploadMultipart uploads a file using multipart upload
|
||||||
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
|
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
|
||||||
if chunkSize%(320*1024) != 0 {
|
if chunkSize%(320*1024) != 0 {
|
||||||
return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
|
return errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create upload session
|
// Create upload session
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
func TestNew(t *testing.T) {
|
||||||
|
@ -310,7 +311,7 @@ func TestEndCallZeroConnections(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errFoo = fmt.Errorf("Foo")
|
var errFoo = errors.New("foo")
|
||||||
|
|
||||||
type dummyPaced struct {
|
type dummyPaced struct {
|
||||||
retry bool
|
retry bool
|
||||||
|
|
|
@ -6,13 +6,13 @@ package rest
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client contains the info to sustain the API
|
// Client contains the info to sustain the API
|
||||||
|
@ -43,7 +43,7 @@ func defaultErrorHandler(resp *http.Response) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetErrorHandler sets the handler to decode an error response when
|
// SetErrorHandler sets the handler to decode an error response when
|
||||||
|
@ -102,14 +102,14 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||||
api.mu.RLock()
|
api.mu.RLock()
|
||||||
defer api.mu.RUnlock()
|
defer api.mu.RUnlock()
|
||||||
if opts == nil {
|
if opts == nil {
|
||||||
return nil, fmt.Errorf("call() called with nil opts")
|
return nil, errors.New("call() called with nil opts")
|
||||||
}
|
}
|
||||||
var url string
|
var url string
|
||||||
if opts.Absolute {
|
if opts.Absolute {
|
||||||
url = opts.Path
|
url = opts.Path
|
||||||
} else {
|
} else {
|
||||||
if api.rootURL == "" {
|
if api.rootURL == "" {
|
||||||
return nil, fmt.Errorf("RootURL not set")
|
return nil, errors.New("RootURL not set")
|
||||||
}
|
}
|
||||||
url = api.rootURL + opts.Path
|
url = api.rootURL + opts.Path
|
||||||
}
|
}
|
||||||
|
|
6
s3/s3.go
6
s3/s3.go
|
@ -14,7 +14,6 @@ What happens if you CTRL-C a multipart upload
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -36,6 +35,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
|
@ -200,7 +200,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||||
func s3ParsePath(path string) (bucket, directory string, err error) {
|
func s3ParsePath(path string) (bucket, directory string, err error) {
|
||||||
parts := matcher.FindStringSubmatch(path)
|
parts := matcher.FindStringSubmatch(path)
|
||||||
if parts == nil {
|
if parts == nil {
|
||||||
err = fmt.Errorf("Couldn't parse bucket out of s3 path %q", path)
|
err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
|
||||||
} else {
|
} else {
|
||||||
bucket, directory = parts[1], parts[2]
|
bucket, directory = parts[1], parts[2]
|
||||||
directory = strings.Trim(directory, "/")
|
directory = strings.Trim(directory, "/")
|
||||||
|
@ -452,7 +452,7 @@ func (f *Fs) listFiles(out fs.ListOpts, dir string) {
|
||||||
defer out.Finished()
|
defer out.Finished()
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
// Return no objects at top level list
|
// Return no objects at top level list
|
||||||
out.SetError(errors.New("Can't list objects at root - choose a bucket using lsd"))
|
out.SetError(errors.New("can't list objects at root - choose a bucket using lsd"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// List the objects and directories
|
// List the objects and directories
|
||||||
|
|
|
@ -3,7 +3,6 @@ package swift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
|
@ -14,6 +13,7 @@ import (
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||||
func parsePath(path string) (container, directory string, err error) {
|
func parsePath(path string) (container, directory string, err error) {
|
||||||
parts := matcher.FindStringSubmatch(path)
|
parts := matcher.FindStringSubmatch(path)
|
||||||
if parts == nil {
|
if parts == nil {
|
||||||
err = fmt.Errorf("Couldn't find container in swift path %q", path)
|
err = errors.Errorf("couldn't find container in swift path %q", path)
|
||||||
} else {
|
} else {
|
||||||
container, directory = parts[1], parts[2]
|
container, directory = parts[1], parts[2]
|
||||||
directory = strings.Trim(directory, "/")
|
directory = strings.Trim(directory, "/")
|
||||||
|
@ -318,7 +318,7 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
|
||||||
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
|
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
|
||||||
defer out.Finished()
|
defer out.Finished()
|
||||||
if f.container == "" {
|
if f.container == "" {
|
||||||
out.SetError(errors.New("Can't list objects at root - choose a container using lsd"))
|
out.SetError(errors.New("can't list objects at root - choose a container using lsd"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// List the objects
|
// List the objects
|
||||||
|
|
|
@ -2,13 +2,14 @@ package src
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Client struct
|
//Client struct
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
package src
|
package src
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformDelete does the actual delete via DELETE request.
|
// PerformDelete does the actual delete via DELETE request.
|
||||||
|
@ -28,7 +29,7 @@ func (c *Client) PerformDelete(url string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("delete error [%d]: %s", resp.StatusCode, string(body[:]))
|
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
package src
|
package src
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformDownload does the actual download via unscoped PUT request.
|
// PerformDownload does the actual download via unscoped PUT request.
|
||||||
|
@ -27,7 +28,7 @@ func (c *Client) PerformDownload(url string) (out io.ReadCloser, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("download error [%d]: %s", resp.StatusCode, string(body[:]))
|
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||||
}
|
}
|
||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
package src
|
package src
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformMkdir does the actual mkdir via PUT request.
|
// PerformMkdir does the actual mkdir via PUT request.
|
||||||
|
@ -27,7 +28,7 @@ func (c *Client) PerformMkdir(url string) (int, string, error) {
|
||||||
return 0, "", err
|
return 0, "", err
|
||||||
}
|
}
|
||||||
//third parameter is the json error response body
|
//third parameter is the json error response body
|
||||||
return resp.StatusCode, string(body[:]), fmt.Errorf("Create Folder error [%d]: %s", resp.StatusCode, string(body[:]))
|
return resp.StatusCode, string(body[:]), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||||
}
|
}
|
||||||
return resp.StatusCode, "", nil
|
return resp.StatusCode, "", nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,10 +3,11 @@ package src
|
||||||
//from yadisk
|
//from yadisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformUpload does the actual upload via unscoped PUT request.
|
// PerformUpload does the actual upload via unscoped PUT request.
|
||||||
|
@ -30,7 +31,7 @@ func (c *Client) PerformUpload(url string, data io.Reader) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("upload error [%d]: %s", resp.StatusCode, string(body[:]))
|
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -407,10 +407,10 @@ func (f *Fs) purgeCheck(check bool) error {
|
||||||
var opt yandex.ResourceInfoRequestOptions
|
var opt yandex.ResourceInfoRequestOptions
|
||||||
ResourceInfoResponse, err := f.yd.NewResourceInfoRequest(f.diskRoot, opt).Exec()
|
ResourceInfoResponse, err := f.yd.NewResourceInfoRequest(f.diskRoot, opt).Exec()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Rmdir failed: %s", err)
|
return errors.Wrap(err, "rmdir failed")
|
||||||
}
|
}
|
||||||
if len(ResourceInfoResponse.Embedded.Items) != 0 {
|
if len(ResourceInfoResponse.Embedded.Items) != 0 {
|
||||||
return fmt.Errorf("Rmdir failed: Directory not empty")
|
return errors.New("rmdir failed: directory not empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//delete directory
|
//delete directory
|
||||||
|
|
Loading…
Reference in a new issue