diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e58e2d79a..f299a4691 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -382,7 +382,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run
- GO111MODULE=on go get -u github.com/pkg/errors
+ GO111MODULE=on go get -u golang.org/x/crypto
Check in a single commit as above.
diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go
index c1dcf80df..58a14427b 100644
--- a/backend/amazonclouddrive/amazonclouddrive.go
+++ b/backend/amazonclouddrive/amazonclouddrive.go
@@ -14,6 +14,7 @@ we ignore assets completely!
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -22,7 +23,6 @@ import (
"time"
acd "github.com/ncw/go-acd"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -259,7 +259,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Amazon Drive")
+ return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
}
c := acd.NewClient(oAuthClient)
@@ -292,13 +292,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get endpoints")
+ return nil, fmt.Errorf("failed to get endpoints: %w", err)
}
// Get rootID
rootInfo, err := f.getRootInfo(ctx)
if err != nil || rootInfo.Id == nil {
- return nil, errors.Wrap(err, "failed to get root")
+ return nil, fmt.Errorf("failed to get root: %w", err)
}
f.trueRootID = *rootInfo.Id
diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go
index 966bf264b..74d5ea0e9 100644
--- a/backend/azureblob/azureblob.go
+++ b/backend/azureblob/azureblob.go
@@ -10,6 +10,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -24,7 +25,6 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -414,10 +414,10 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
- return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
+ return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -459,11 +459,11 @@ const azureStorageEndpoint = "https://storage.azure.com/"
func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) {
var spCredentials servicePrincipalCredentials
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
- return nil, errors.Wrap(err, "error parsing credentials from JSON file")
+ return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
}
oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant)
if err != nil {
- return nil, errors.Wrap(err, "error creating oauth config")
+ return nil, fmt.Errorf("error creating oauth config: %w", err)
}
// Create service principal token for Azure Storage.
@@ -473,7 +473,7 @@ func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []by
spCredentials.Password,
azureStorageEndpoint)
if err != nil {
- return nil, errors.Wrap(err, "error creating service principal token")
+ return nil, fmt.Errorf("error creating service principal token: %w", err)
}
// Wrap token inside a refresher closure.
@@ -526,10 +526,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "azure: chunk size")
+ return nil, fmt.Errorf("azure: chunk size: %w", err)
}
if opt.ListChunkSize > maxListChunkSize {
- return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
+ return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
@@ -538,12 +538,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessTier == "" {
opt.AccessTier = string(defaultAccessTier)
} else if !validateAccessTier(opt.AccessTier) {
- return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
+ return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
- return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
+ return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
}
@@ -585,11 +585,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
- return nil, errors.Wrapf(err, "Failed to parse credentials")
+ return nil, fmt.Errorf("Failed to parse credentials: %w", err)
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
- return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
+ return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
@@ -631,12 +631,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
if err != nil {
- return nil, errors.Wrapf(err, "Failed to acquire MSI token")
+ return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
- return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
+ return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration {
fs.Debugf(f, "Token refresher called.")
@@ -666,19 +666,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
- return nil, errors.Wrapf(err, "Failed to parse credentials")
+ return nil, fmt.Errorf("Failed to parse credentials: %w", err)
}
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
- return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
+ return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.SASURL != "":
u, err = url.Parse(opt.SASURL)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse SAS URL")
+ return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
}
// use anonymous credentials in case of sas url
pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
@@ -698,17 +698,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a standard URL.
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
if err != nil {
- return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
+ return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
// Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil {
- return nil, errors.Wrap(err, "error opening service principal credentials file")
+ return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
}
// Create a token refresher from service principal credentials.
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds)
if err != nil {
- return nil, errors.Wrap(err, "failed to create a service principal token")
+ return nil, fmt.Errorf("failed to create a service principal token: %w", err)
}
options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
@@ -1324,7 +1324,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
data, err := base64.StdEncoding.DecodeString(o.md5)
if err != nil {
- return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5)
+ return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
}
return hex.EncodeToString(data), nil
}
@@ -1510,7 +1510,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var offset int64
var count int64
if o.AccessTier() == azblob.AccessTierArchive {
- return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
+ return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
@@ -1536,11 +1536,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to open for download")
+ return nil, fmt.Errorf("failed to open for download: %w", err)
}
err = o.decodeMetaDataFromDownloadResponse(downloadResponse)
if err != nil {
- return nil, errors.Wrap(err, "failed to decode metadata for download")
+ return nil, fmt.Errorf("failed to decode metadata for download: %w", err)
}
in = downloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil
@@ -1630,7 +1630,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "deleting archive tier blob before updating")
err = o.Remove(ctx)
if err != nil {
- return errors.Wrap(err, "failed to delete archive blob before updating")
+ return fmt.Errorf("failed to delete archive blob before updating: %w", err)
}
} else {
return errCantUpdateArchiveTierBlobs
@@ -1723,7 +1723,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
// SetTier performs changing object tier
func (o *Object) SetTier(tier string) error {
if !validateAccessTier(tier) {
- return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier)
+ return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
}
// Check if current tier already matches with desired tier
@@ -1739,7 +1739,7 @@ func (o *Object) SetTier(tier string) error {
})
if err != nil {
- return errors.Wrap(err, "Failed to set Blob Tier")
+ return fmt.Errorf("Failed to set Blob Tier: %w", err)
}
// Set access tier on local object also, this typically
diff --git a/backend/azureblob/imds.go b/backend/azureblob/imds.go
index 779e0b118..b23e91d62 100644
--- a/backend/azureblob/imds.go
+++ b/backend/azureblob/imds.go
@@ -13,7 +13,6 @@ import (
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
@@ -95,7 +94,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
- return result, errors.Wrap(err, "MSI is not enabled on this VM")
+ return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
@@ -120,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return result, errors.Wrap(err, "Couldn't read IMDS response")
+ return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
@@ -131,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
- return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
+ return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
}
return result, nil
diff --git a/backend/b2/b2.go b/backend/b2/b2.go
index 32c38211d..b96feb225 100644
--- a/backend/b2/b2.go
+++ b/backend/b2/b2.go
@@ -9,6 +9,7 @@ import (
"bytes"
"context"
"crypto/sha1"
+ "errors"
"fmt"
gohash "hash"
"io"
@@ -19,7 +20,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -366,7 +366,7 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -381,7 +381,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
- return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
+ return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
@@ -414,11 +414,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
- return nil, errors.Wrap(err, "b2: upload cutoff")
+ return nil, fmt.Errorf("b2: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "b2: chunk size")
+ return nil, fmt.Errorf("b2: chunk size: %w", err)
}
if opt.Account == "" {
return nil, errors.New("account not found")
@@ -463,7 +463,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = f.authorizeAccount(ctx)
if err != nil {
- return nil, errors.Wrap(err, "failed to authorize account")
+ return nil, fmt.Errorf("failed to authorize account: %w", err)
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
@@ -472,7 +472,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.rootBucket {
- return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
+ return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
@@ -512,7 +512,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
return f.shouldRetryNoReauth(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to authenticate")
+ return fmt.Errorf("failed to authenticate: %w", err)
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
@@ -558,7 +558,7 @@ func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUp
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get upload URL")
+ return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -1048,7 +1048,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
}
}
- return errors.Wrap(err, "failed to create bucket")
+ return fmt.Errorf("failed to create bucket: %w", err)
}
f.setBucketID(bucket, response.ID)
f.setBucketType(bucket, response.Type)
@@ -1083,7 +1083,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to delete bucket")
+ return fmt.Errorf("failed to delete bucket: %w", err)
}
f.clearBucketID(bucket)
f.clearBucketType(bucket)
@@ -1124,7 +1124,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
return nil
}
}
- return errors.Wrapf(err, "failed to hide %q", bucketPath)
+ return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
}
return nil
}
@@ -1145,7 +1145,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrapf(err, "failed to delete %q", Name)
+ return fmt.Errorf("failed to delete %q: %w", Name, err)
}
return nil
}
@@ -1364,7 +1364,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return "", errors.Wrap(err, "failed to get download authorization")
+ return "", fmt.Errorf("failed to get download authorization: %w", err)
}
return response.AuthorizationToken, nil
}
@@ -1669,14 +1669,14 @@ func (file *openFile) Close() (err error) {
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
- return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
+ return fmt.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
- return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
+ return fmt.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
@@ -1716,7 +1716,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
return nil, nil, fs.ErrorObjectNotFound
}
- return nil, nil, errors.Wrapf(err, "failed to %s for download", method)
+ return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
}
// NB resp may be Open here - don't return err != nil without closing
diff --git a/backend/b2/upload.go b/backend/b2/upload.go
index 44092dda4..827d33578 100644
--- a/backend/b2/upload.go
+++ b/backend/b2/upload.go
@@ -15,7 +15,6 @@ import (
"strings"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -102,7 +101,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
parts++
}
if parts > maxParts {
- return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
+ return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
@@ -185,7 +184,7 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get upload URL")
+ return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
@@ -406,7 +405,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
- return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
+ return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
diff --git a/backend/box/box.go b/backend/box/box.go
index 4281a46ca..bde259cea 100644
--- a/backend/box/box.go
+++ b/backend/box/box.go
@@ -14,6 +14,7 @@ import (
"crypto/rsa"
"encoding/json"
"encoding/pem"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -26,13 +27,6 @@ import (
"sync/atomic"
"time"
- "github.com/rclone/rclone/lib/encoder"
- "github.com/rclone/rclone/lib/env"
- "github.com/rclone/rclone/lib/jwtutil"
-
- "github.com/youmark/pkcs8"
-
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -43,9 +37,13 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
+ "github.com/rclone/rclone/lib/encoder"
+ "github.com/rclone/rclone/lib/env"
+ "github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
+ "github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
@@ -93,7 +91,7 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
+ return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
@@ -167,15 +165,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
- return errors.Wrap(err, "get box config")
+ return fmt.Errorf("get box config: %w", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
- return errors.Wrap(err, "get decrypted private key")
+ return fmt.Errorf("get decrypted private key: %w", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
- return errors.Wrap(err, "get claims")
+ return fmt.Errorf("get claims: %w", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
@@ -187,11 +185,11 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
- return nil, errors.Wrap(err, "box: failed to read Box config")
+ return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
- return nil, errors.Wrap(err, "box: failed to parse Box config")
+ return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
}
return boxConfig, nil
}
@@ -199,7 +197,7 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
- return nil, errors.Wrap(err, "box: failed to generate random string for jti")
+ return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &jws.ClaimSet{
@@ -240,12 +238,12 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
- return nil, errors.Wrap(err, "box: extra data included in private key")
+ return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
- return nil, errors.Wrap(err, "box: failed to decrypt private key")
+ return nil, fmt.Errorf("box: failed to decrypt private key: %w", err)
}
return rsaKey.(*rsa.PrivateKey), nil
@@ -403,7 +401,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.UploadCutoff < minUploadCutoff {
- return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
+ return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
@@ -414,7 +412,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Box")
+ return nil, fmt.Errorf("failed to configure Box: %w", err)
}
}
@@ -613,7 +611,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Entries {
item := &result.Entries[i]
@@ -740,14 +738,14 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
- return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
+ return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
}
if conflict.Conflicts.Type != api.ItemTypeFile {
- return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
+ return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
}
return conflict.Conflicts.ID, nil
}
- return "", errors.Wrap(err, "pre-upload check")
+ return "", fmt.Errorf("pre-upload check: %w", err)
}
return "", nil
}
@@ -856,7 +854,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "rmdir failed")
+ return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -900,7 +898,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
- return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
+ return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -984,7 +982,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to read user info")
+ return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
@@ -1145,7 +1143,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
})
wg.Wait()
if deleteErrors != 0 {
- return errors.Errorf("failed to delete %d trash items", deleteErrors)
+ return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return err
}
@@ -1205,7 +1203,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
return fs.ErrorIsDir
}
if info.Type != api.ItemTypeFile {
- return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
+ return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = int64(info.Size)
@@ -1341,7 +1339,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
return err
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
- return errors.Errorf("failed to upload %v - not sure why", o)
+ return fmt.Errorf("failed to upload %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}
diff --git a/backend/box/upload.go b/backend/box/upload.go
index a7ad6cc09..c664cfbf5 100644
--- a/backend/box/upload.go
+++ b/backend/box/upload.go
@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -15,7 +16,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -140,7 +140,7 @@ outer:
}
}
default:
- return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
+ return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
@@ -151,7 +151,7 @@ outer:
}
err = json.Unmarshal(body, &result)
if err != nil {
- return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
+ return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
}
return result, nil
}
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
- return errors.Wrap(err, "multipart upload create session failed")
+ return fmt.Errorf("multipart upload create session failed: %w", err)
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
@@ -222,7 +222,7 @@ outer:
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
- err = errors.Wrap(err, "multipart upload failed to read source")
+ err = fmt.Errorf("multipart upload failed to read source: %w", err)
break outer
}
@@ -238,7 +238,7 @@ outer:
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
if err != nil {
- err = errors.Wrap(err, "multipart upload failed to upload part")
+ err = fmt.Errorf("multipart upload failed to upload part: %w", err)
select {
case errs <- err:
default:
@@ -266,11 +266,11 @@ outer:
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
- return errors.Wrap(err, "multipart upload failed to finalize")
+ return fmt.Errorf("multipart upload failed to finalize: %w", err)
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
- return errors.Errorf("multipart upload failed %v - not sure why", o)
+ return fmt.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}
diff --git a/backend/cache/cache.go b/backend/cache/cache.go
index 9962406f7..50dd5ce38 100644
--- a/backend/cache/cache.go
+++ b/backend/cache/cache.go
@@ -5,6 +5,7 @@ package cache
import (
"context"
+ "errors"
"fmt"
"io"
"math"
@@ -19,7 +20,6 @@ import (
"syscall"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -356,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
- return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
+ return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -366,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
+ return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
- return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
+ return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -401,7 +401,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
- return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
+ return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
@@ -413,7 +413,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
m.Set("plex_token", token)
})
if err != nil {
- return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
+ return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
@@ -434,11 +434,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
+ return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
}
err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
+ return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
}
dbPath = filepath.Join(dbPath, name+".db")
@@ -450,7 +450,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime),
})
if err != nil {
- return nil, errors.Wrapf(err, "failed to start cache db")
+ return nil, fmt.Errorf("failed to start cache db: %w", err)
}
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1)
@@ -484,12 +484,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
+ return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
+ return nil, fmt.Errorf("failed to create temp fs: %v: %w", err, err)
}
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -606,7 +606,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
- return out, errors.Errorf("error while getting cache stats")
+ return out, fmt.Errorf("error while getting cache stats")
}
out["status"] = "ok"
out["stats"] = m
@@ -633,7 +633,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
- return out, errors.Errorf("remote is needed")
+ return out, fmt.Errorf("remote is needed")
}
remote := remoteInt.(string)
withData := false
@@ -644,7 +644,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
- return out, errors.Errorf("%s doesn't exist in cache", remote)
+ return out, fmt.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -653,7 +653,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd)
if err != nil {
- return out, errors.WithMessage(err, "error expiring directory")
+ return out, fmt.Errorf("error expiring directory: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -664,7 +664,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry
err = f.cache.ExpireObject(co, withData)
if err != nil {
- return out, errors.WithMessage(err, "error expiring file")
+ return out, fmt.Errorf("error expiring file: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -685,24 +685,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
- return nil, errors.Errorf("invalid range: %q", part)
+ return nil, fmt.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
- return nil, errors.Errorf("invalid range: %q", part)
+ return nil, fmt.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
- return nil, errors.Errorf("invalid range: %q", part)
+ return nil, fmt.Errorf("invalid range: %q", part)
}
}
default:
- return nil, errors.Errorf("invalid range: %q", part)
+ return nil, fmt.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
@@ -757,18 +757,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
- return nil, errors.Wrap(err, "invalid chunks parameter")
+ return nil, fmt.Errorf("invalid chunks parameter: %w", err)
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
- return nil, errors.Errorf("invalid parameter %s=%s", k, v)
+ return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
- return nil, errors.Errorf("invalid parameter %s=%s", k, v)
+ return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
@@ -1124,7 +1124,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
- return errors.Errorf("Unknown object type %T", entry)
+ return fmt.Errorf("Unknown object type %T", entry)
}
}
diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go
index 281d92af1..4bea04291 100644
--- a/backend/cache/cache_internal_test.go
+++ b/backend/cache/cache_internal_test.go
@@ -7,6 +7,7 @@ import (
"bytes"
"context"
"encoding/base64"
+ "errors"
goflag "flag"
"fmt"
"io"
@@ -22,7 +23,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -446,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err
}
if coSize != expectedSize {
- return errors.Errorf("%v <> %v", coSize, expectedSize)
+ return fmt.Errorf("%v <> %v", coSize, expectedSize)
}
return nil
}, 12, time.Second*10)
@@ -502,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 2 {
log.Printf("not expected listing /test: %v", li)
- return errors.Errorf("not expected listing /test: %v", li)
+ return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
@@ -512,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li)
- return errors.Errorf("not expected listing /test/one: %v", li)
+ return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
@@ -522,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li)
- return errors.Errorf("not expected listing /test/second: %v", li)
+ return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name())
- return errors.Errorf("not expected name: %v", fi.Name())
+ return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote())
- return errors.Errorf("not expected remote: %v", di.Remote())
+ return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
- return errors.Errorf("unexpected listing: %v", li)
+ return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing: %v", li)
@@ -591,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
log.Printf("not found /test")
- return errors.Errorf("not found /test")
+ return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
log.Printf("not found /test/one")
- return errors.Errorf("not found /test/one")
+ return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
log.Printf("not found /test/one/test2")
- return errors.Errorf("not found /test/one/test2")
+ return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
@@ -610,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li)
- return errors.Errorf("not expected listing /test/one: %v", li)
+ return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name())
- return errors.Errorf("not expected name: %v", fi.Name())
+ return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote())
- return errors.Errorf("not expected remote: %v", di.Remote())
+ return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
- return errors.Errorf("unexpected listing: %v", li)
+ return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing /test/one/test2")
return nil
@@ -1062,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
- return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
+ return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
return checkSample, nil
}
@@ -1257,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
case state = <-buCh:
// continue
case <-time.After(maxDuration):
- waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
+ waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
return
}
checkRemote := state.Remote
@@ -1274,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return
}
}
- waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
+ waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
}()
return waitCh
}
diff --git a/backend/cache/handle.go b/backend/cache/handle.go
index f39f5f633..a7a1497e1 100644
--- a/backend/cache/handle.go
+++ b/backend/cache/handle.go
@@ -5,6 +5,7 @@ package cache
import (
"context"
+ "errors"
"fmt"
"io"
"path"
@@ -13,7 +14,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
@@ -243,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
- return nil, errors.Errorf("chunk not found %v", chunkStart)
+ return nil, fmt.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
@@ -323,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
- err = errors.Errorf("cache: unimplemented seek whence %v", whence)
+ err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
diff --git a/backend/cache/object.go b/backend/cache/object.go
index adae08970..8dc072017 100644
--- a/backend/cache/object.go
+++ b/backend/cache/object.go
@@ -5,12 +5,12 @@ package cache
import (
"context"
+ "fmt"
"io"
"path"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -178,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
- err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
+ if err != nil {
+ err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
+ }
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
- err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
+ if err != nil {
+ err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
+ }
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
@@ -253,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
- return errors.Errorf("%v is currently uploading, can't update", o)
+ return fmt.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -292,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
- return errors.Errorf("%v is currently uploading, can't delete", o)
+ return fmt.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)
diff --git a/backend/cache/storage_memory.go b/backend/cache/storage_memory.go
index 8705e9125..8e8a360fc 100644
--- a/backend/cache/storage_memory.go
+++ b/backend/cache/storage_memory.go
@@ -4,12 +4,12 @@
package cache
import (
+ "fmt"
"strconv"
"strings"
"time"
cache "github.com/patrickmn/go-cache"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -53,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil
}
- return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
+ return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object
diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go
index b6d292989..89af35f87 100644
--- a/backend/cache/storage_persistent.go
+++ b/backend/cache/storage_persistent.go
@@ -17,7 +17,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
@@ -120,11 +119,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
- return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
+ return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
- return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
+ return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
}
if b.features.PurgeDb {
b.Purge()
@@ -176,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx)
if bucket == nil {
- return errors.Errorf("couldn't open bucket (%v)", remote)
+ return fmt.Errorf("couldn't open bucket (%v)", remote)
}
data := bucket.Get([]byte("."))
@@ -184,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd)
}
- return errors.Errorf("%v not found", remote)
+ return fmt.Errorf("%v not found", remote)
})
return cd, err
@@ -209,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
- return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
+ return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
for _, cachedDir := range cachedDirs {
@@ -226,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir)
if err != nil {
- return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
+ return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
@@ -244,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
- return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
+ return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
- return errors.Errorf("error during unmarshalling obj: %v", err)
+ return fmt.Errorf("error during unmarshalling obj: %v", err)
}
} else {
- return errors.Errorf("missing cached dir: %v", cachedDir)
+ return fmt.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
@@ -269,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
- return errors.Errorf("couldn't open bucket (%v)", string(k))
+ return fmt.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
@@ -318,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
- return errors.Errorf("couldn't open bucket (%v)", fp)
+ return fmt.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -378,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
- return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
+ return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
- return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
+ return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
@@ -393,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
- return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
+ return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
- return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
+ return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil {
- return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
+ return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
@@ -414,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
- return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
+ return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
@@ -446,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
- return errors.Errorf("couldn't open parent bucket for %v", remote)
+ return fmt.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
@@ -455,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil
}
- return errors.Errorf("couldn't find object (%v)", remote)
+ return fmt.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
@@ -555,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
- return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
+ return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -733,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil
}
}
- return errors.Errorf("not found %v-%v", path, offset)
+ return fmt.Errorf("not found %v-%v", path, offset)
})
return t, err
@@ -773,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
tempObj := &tempUploadInfo{
DestPath: destPath,
@@ -784,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
- return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
+ return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
- return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
+ return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
return nil
@@ -803,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -836,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil
}
- return errors.Errorf("no pending upload found")
+ return fmt.Errorf("no pending upload found")
})
return destPath, err
@@ -847,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
- return errors.Errorf("pending upload (%v) not found %v", remote, err)
+ return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
started = tempObj.Started
@@ -869,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -899,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
- return errors.Errorf("pending upload (%v) not found %v", remote, err)
+ return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
- return errors.Errorf("pending upload not updated %v", err)
+ return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
- return errors.Errorf("pending upload not updated %v", err)
+ return fmt.Errorf("pending upload not updated %v", err)
}
return nil
})
@@ -927,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
return bucket.Delete([]byte(remote))
})
@@ -942,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
- return errors.Errorf("couldn't bucket for %v", tempBucket)
+ return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
- return errors.Errorf("pending upload (%v) not found %v", remote, err)
+ return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
if tempObj.Started {
- return errors.Errorf("pending upload already started %v", remote)
+ return fmt.Errorf("pending upload already started %v", remote)
}
err = fn(tempObj)
if err != nil {
@@ -970,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
- return errors.Errorf("pending upload not updated %v", err)
+ return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
- return errors.Errorf("pending upload not updated %v", err)
+ return fmt.Errorf("pending upload not updated %v", err)
}
return nil
@@ -1015,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
- return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
+ return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
- return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
+ return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
}
diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go
index 3d4de1b85..76bed2e6c 100644
--- a/backend/chunker/chunker.go
+++ b/backend/chunker/chunker.go
@@ -8,6 +8,7 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
gohash "hash"
"io"
@@ -21,7 +22,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -290,13 +290,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
baseName, basePath, err := fspath.SplitFs(remote)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
+ return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
if err != fs.ErrorIsFile && err != nil {
- return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
+ return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
@@ -386,7 +386,7 @@ type Fs struct {
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
- return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
+ return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
}
if err := f.setMetaFormat(metaFormat); err != nil {
return err
@@ -878,7 +878,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
- return nil, errors.Wrap(err, "can't access")
+ return nil, fmt.Errorf("can't access: %w", err)
}
var (
@@ -927,7 +927,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
case fs.ErrorDirNotFound:
entries = nil
default:
- return nil, errors.Wrap(err, "can't detect composite file")
+ return nil, fmt.Errorf("can't detect composite file: %w", err)
}
if f.useNoRename {
@@ -1067,7 +1067,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
- return errors.Wrap(err, "invalid metadata")
+ return fmt.Errorf("invalid metadata: %w", err)
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
return errors.New("metadata doesn't match file size")
@@ -1132,7 +1132,7 @@ func (f *Fs) put(
// Perform consistency checks
if err := f.forbidChunk(src, remote); err != nil {
- return nil, errors.Wrap(err, action+" refused")
+ return nil, fmt.Errorf("%s refused: %w", action, err)
}
if target == nil {
// Get target object with a quick directory scan
@@ -1146,7 +1146,7 @@ func (f *Fs) put(
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
- return nil, errors.Wrap(err, "refusing to "+action)
+ return nil, fmt.Errorf("refusing to %s: %w", action, err)
}
}
@@ -1564,7 +1564,7 @@ func (f *Fs) Hashes() hash.Set {
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if err := f.forbidChunk(dir, dir); err != nil {
- return errors.Wrap(err, "can't mkdir")
+ return fmt.Errorf("can't mkdir: %w", err)
}
return f.base.Mkdir(ctx, dir)
}
@@ -1633,7 +1633,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses
// to corrupt file in hard mode. Hence, refuse to Remove, too.
- return errors.Wrap(err, "refuse to corrupt")
+ return fmt.Errorf("refuse to corrupt: %w", err)
}
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
// Proceed but warn user that unexpected things can happen.
@@ -1661,12 +1661,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// copyOrMove implements copy or move
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
if err := f.forbidChunk(o, remote); err != nil {
- return nil, errors.Wrapf(err, "can't %s", opName)
+ return nil, fmt.Errorf("can't %s: %w", opName, err)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
- return nil, errors.Wrapf(err, "can't %s this file", opName)
+ return nil, fmt.Errorf("can't %s this file: %w", opName, err)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
@@ -2163,7 +2163,7 @@ func (o *Object) UnWrap() fs.Object {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
- return nil, errors.Wrap(err, "can't open")
+ return nil, fmt.Errorf("can't open: %w", err)
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
diff --git a/backend/compress/compress.go b/backend/compress/compress.go
index 6fc04a8bd..c7b3a4006 100644
--- a/backend/compress/compress.go
+++ b/backend/compress/compress.go
@@ -10,6 +10,7 @@ import (
"encoding/binary"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -21,7 +22,6 @@ import (
"github.com/buengese/sgzip"
"github.com/gabriel-vasile/mimetype"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
@@ -143,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
- return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
+ return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
}
// Strip trailing slashes if they exist in rpath
@@ -158,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
}
if err != nil && err != fs.ErrorIsFile {
- return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
+ return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
}
// Create the wrapping fs
@@ -304,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory:
f.addDir(&newEntries, x)
default:
- return nil, errors.Errorf("Unknown object type %T", entry)
+ return nil, fmt.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
@@ -410,7 +410,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
srcHash := hasher.Sums()[ht]
dstHash, err := o.Hash(ctx, ht)
if err != nil {
- return errors.Wrap(err, "failed to read destination hash")
+ return fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
@@ -418,7 +418,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
- return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
+ return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
}
return nil
}
@@ -462,10 +462,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name())
}()
if err != nil {
- return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
+ return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
}
if _, err = io.Copy(tempFile, in); err != nil {
- return nil, errors.Wrap(err, "Failed to write temporary local file")
+ return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
}
if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err
@@ -714,7 +714,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Could remove original object")
+ return nil, fmt.Errorf("Could remove original object: %w", err)
}
}
@@ -723,7 +723,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
- return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
+ return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
}
newObj.Object = wrapObj
}
diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go
index de0bbd055..161ccc818 100644
--- a/backend/crypt/cipher.go
+++ b/backend/crypt/cipher.go
@@ -7,6 +7,7 @@ import (
gocipher "crypto/cipher"
"crypto/rand"
"encoding/base32"
+ "errors"
"fmt"
"io"
"strconv"
@@ -15,7 +16,6 @@ import (
"time"
"unicode/utf8"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -94,7 +94,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate":
mode = NameEncryptionObfuscated
default:
- err = errors.Errorf("Unknown file name encryption mode %q", s)
+ err = fmt.Errorf("Unknown file name encryption mode %q", s)
}
return mode, err
}
@@ -580,7 +580,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
func (n *nonce) fromReader(in io.Reader) error {
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
- return errors.Wrap(err, "short read of nonce")
+ return fmt.Errorf("short read of nonce: %w", err)
}
return nil
}
@@ -956,7 +956,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
if err != nil {
- return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
+ return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
}
// Set the file handle
diff --git a/backend/crypt/cipher_test.go b/backend/crypt/cipher_test.go
index dbe0e42be..41c37b2a8 100644
--- a/backend/crypt/cipher_test.go
+++ b/backend/crypt/cipher_test.go
@@ -4,13 +4,13 @@ import (
"bytes"
"context"
"encoding/base32"
+ "errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
@@ -637,7 +637,7 @@ func (r *randomSource) Read(p []byte) (n int, err error) {
func (r *randomSource) Write(p []byte) (n int, err error) {
for i := range p {
if p[i] != r.next() {
- return 0, errors.Errorf("Error in stream at %d", r.counter)
+ return 0, fmt.Errorf("Error in stream at %d", r.counter)
}
}
return len(p), nil
diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go
index 57f2858bd..56a3dcccd 100644
--- a/backend/crypt/crypt.go
+++ b/backend/crypt/crypt.go
@@ -3,13 +3,13 @@ package crypt
import (
"context"
+ "errors"
"fmt"
"io"
"path"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -131,18 +131,18 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
- return nil, errors.Wrap(err, "failed to decrypt password")
+ return nil, fmt.Errorf("failed to decrypt password: %w", err)
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
- return nil, errors.Wrap(err, "failed to decrypt password2")
+ return nil, fmt.Errorf("failed to decrypt password2: %w", err)
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
if err != nil {
- return nil, errors.Wrap(err, "failed to make cipher")
+ return nil, fmt.Errorf("failed to make cipher: %w", err)
}
return cipher, nil
}
@@ -192,7 +192,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
}
}
if err != fs.ErrorIsFile && err != nil {
- return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
+ return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
Fs: wrappedFs,
@@ -300,7 +300,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory:
f.addDir(ctx, &newEntries, x)
default:
- return nil, errors.Errorf("Unknown object type %T", entry)
+ return nil, fmt.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
@@ -406,7 +406,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
var dstHash string
dstHash, err = o.Hash(ctx, ht)
if err != nil {
- return nil, errors.Wrap(err, "failed to read destination hash")
+ return nil, fmt.Errorf("failed to read destination hash: %w", err)
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
@@ -415,7 +415,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
- return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
+ return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -616,24 +616,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
- return "", errors.Wrap(err, "failed to open src")
+ return "", fmt.Errorf("failed to open src: %w", err)
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
- return "", errors.Wrap(err, "failed to make encrypter")
+ return "", fmt.Errorf("failed to make encrypter: %w", err)
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
- return "", errors.Wrap(err, "failed to make hasher")
+ return "", fmt.Errorf("failed to make hasher: %w", err)
}
_, err = io.Copy(m, out)
if err != nil {
- return "", errors.Wrap(err, "failed to hash data")
+ return "", fmt.Errorf("failed to hash data: %w", err)
}
return m.Sums()[hashType], nil
@@ -652,12 +652,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
- return "", errors.Wrap(err, "failed to open object to read nonce")
+ return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
d, err := f.cipher.newDecrypter(in)
if err != nil {
_ = in.Close()
- return "", errors.Wrap(err, "failed to open object to read nonce")
+ return "", fmt.Errorf("failed to open object to read nonce: %w", err)
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
@@ -676,7 +676,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
- return "", errors.Wrap(err, "failed to close nonce read")
+ return "", fmt.Errorf("failed to close nonce read: %w", err)
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
@@ -795,7 +795,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
- return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
+ return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
}
out = append(out, fileName)
}
diff --git a/backend/crypt/pkcs7/pkcs7.go b/backend/crypt/pkcs7/pkcs7.go
index e6d9d0fd9..db604ae4b 100644
--- a/backend/crypt/pkcs7/pkcs7.go
+++ b/backend/crypt/pkcs7/pkcs7.go
@@ -4,7 +4,7 @@
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
-import "github.com/pkg/errors"
+import "errors"
// Errors Unpad can return
var (
diff --git a/backend/drive/drive.go b/backend/drive/drive.go
old mode 100755
new mode 100644
index 5be87962b..01e3fd6a4
--- a/backend/drive/drive.go
+++ b/backend/drive/drive.go
@@ -11,6 +11,7 @@ import (
"bytes"
"context"
"crypto/tls"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -25,7 +26,6 @@ import (
"text/template"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
@@ -188,7 +188,7 @@ func init() {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
- return nil, errors.Wrap(err, "couldn't parse config into struct")
+ return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
switch config.State {
@@ -226,7 +226,7 @@ func init() {
case "teamdrive_config":
f, err := newFs(ctx, name, "", m)
if err != nil {
- return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
+ return nil, fmt.Errorf("failed to make Fs to list Shared Drives: %w", err)
}
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
@@ -755,7 +755,7 @@ func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (in
func (f *Fs) getRootID(ctx context.Context) (string, error) {
info, err := f.getFile(ctx, "root", "id")
if err != nil {
- return "", errors.Wrap(err, "couldn't find root directory ID")
+ return "", fmt.Errorf("couldn't find root directory ID: %w", err)
}
return info.Id, nil
}
@@ -882,7 +882,7 @@ OUTER:
return f.shouldRetry(ctx, err)
})
if err != nil {
- return false, errors.Wrap(err, "couldn't list directory")
+ return false, fmt.Errorf("couldn't list directory: %w", err)
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
@@ -904,7 +904,7 @@ OUTER:
}
item, err = f.resolveShortcut(ctx, item)
if err != nil {
- return false, errors.Wrap(err, "list")
+ return false, fmt.Errorf("list: %w", err)
}
}
// Check the case of items is correct since
@@ -965,7 +965,7 @@ func fixMimeType(mimeTypeIn string) string {
mimeTypeOut = mime.FormatMediaType(mediaType, param)
}
if mimeTypeOut == "" {
- panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
+ panic(fmt.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
}
@@ -1000,7 +1000,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
}
mt := mime.TypeByExtension(extension)
if mt == "" {
- return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
+ return extensions, mimeTypes, fmt.Errorf("couldn't find MIME type for extension %q", extension)
}
if !containsString(extensions, extension) {
extensions = append(extensions, extension)
@@ -1027,7 +1027,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
if err != nil {
- return nil, errors.Wrap(err, "error processing credentials")
+ return nil, fmt.Errorf("error processing credentials: %w", err)
}
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
@@ -1044,19 +1044,19 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
- return nil, errors.Wrap(err, "error opening service account credentials file")
+ return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, opt, []byte(opt.ServiceAccountCredentials))
if err != nil {
- return nil, errors.Wrap(err, "failed to create oauth client from service account")
+ return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
- return nil, errors.Wrap(err, "failed to create oauth client")
+ return nil, fmt.Errorf("failed to create oauth client: %w", err)
}
}
@@ -1065,10 +1065,10 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) {
- return errors.Errorf("%v isn't a power of two", cs)
+ return fmt.Errorf("%v isn't a power of two", cs)
}
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -1106,16 +1106,16 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
- return nil, errors.Wrap(err, "drive: upload cutoff")
+ return nil, fmt.Errorf("drive: upload cutoff: %w", err)
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "drive: chunk size")
+ return nil, fmt.Errorf("drive: chunk size: %w", err)
}
oAuthClient, err := createOAuthClient(ctx, opt, name, m)
if err != nil {
- return nil, errors.Wrap(err, "drive: failed when making oauth client")
+ return nil, fmt.Errorf("drive: failed when making oauth client: %w", err)
}
root, err := parseDrivePath(path)
@@ -1149,13 +1149,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
- return nil, errors.Wrap(err, "couldn't create Drive client")
+ return nil, fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
- return nil, errors.Wrap(err, "couldn't create Drive v2 client")
+ return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
}
@@ -1180,7 +1180,8 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
// otherwise look up the actual root ID
rootID, err := f.getRootID(ctx)
if err != nil {
- if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
+ var gerr *googleapi.Error
+ if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
@@ -1322,7 +1323,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
t := linkTemplate(exportMimeType)
if t == nil {
- return nil, errors.Errorf("unsupported link type %s", exportMimeType)
+ return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
}
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
if xdgIcon == "" {
@@ -1335,7 +1336,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
info.WebViewLink, info.Name, xdgIcon,
})
if err != nil {
- return nil, errors.Wrap(err, "executing template failed")
+ return nil, fmt.Errorf("executing template failed: %w", err)
}
baseObject := f.newBaseObject(remote+extension, info)
@@ -1372,7 +1373,7 @@ func (f *Fs) newObjectWithExportInfo(
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(ctx, info)
if err != nil {
- return nil, errors.Wrap(err, "new object")
+ return nil, fmt.Errorf("new object: %w", err)
}
switch {
case info.MimeType == driveFolderType:
@@ -2015,13 +2016,14 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
}
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
- if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
+ var gerr *googleapi.Error
+ if errors.As(err, &gerr) && gerr.Code == 404 {
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
fs.Logf(nil, "Dangling shortcut %q detected", item.Name)
item.MimeType = shortcutMimeTypeDangling
return item, nil
}
- return nil, errors.Wrap(err, "failed to resolve shortcut")
+ return nil, fmt.Errorf("failed to resolve shortcut: %w", err)
}
// make sure we use the Name, Parents and Trashed from the original item
newItem.Name = item.Name
@@ -2123,10 +2125,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
if exportExt == "" {
- return nil, errors.Errorf("No export format found for %q", importMimeType)
+ return nil, fmt.Errorf("No export format found for %q", importMimeType)
}
if exportExt != srcExt && !f.opt.AllowImportNameChange {
- return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
+ return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
}
}
}
@@ -2194,7 +2196,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return false
})
if err != nil {
- return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
+ return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
}
// move them into place
for _, info := range infos {
@@ -2210,14 +2212,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return f.shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
+ return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.delete(ctx, srcDir.ID(), true)
if err != nil {
- return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
+ return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
}
}
return nil
@@ -2280,7 +2282,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return err
}
if found {
- return errors.Errorf("directory not empty")
+ return fmt.Errorf("directory not empty")
}
}
if root != "" {
@@ -2458,7 +2460,7 @@ func (f *Fs) cleanupTeamDrive(ctx context.Context, dir string, directoryID strin
return false
})
if err != nil {
- err = errors.Wrap(err, "failed to list directory")
+ err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++
fs.Errorf(dir, "%v", err)
}
@@ -2502,7 +2504,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
return f.shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "failed to get Shared Drive info")
+ return fmt.Errorf("failed to get Shared Drive info: %w", err)
}
fs.Debugf(f, "read info from Shared Drive %q", td.Name)
return err
@@ -2525,7 +2527,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get Drive storageQuota")
+ return nil, fmt.Errorf("failed to get Drive storageQuota: %w", err)
}
q := about.StorageQuota
usage := &fs.Usage{
@@ -2849,7 +2851,7 @@ func (f *Fs) Hashes() hash.Set {
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
if err != nil {
- return errors.Wrap(err, "couldn't convert chunk size to int")
+ return fmt.Errorf("couldn't convert chunk size to int: %w", err)
}
chunkSize := fs.SizeSuffix(chunkSizeInt)
if chunkSize == f.opt.ChunkSize {
@@ -2886,17 +2888,17 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(ctx, &f.opt, f.name, f.m)
if err != nil {
- return errors.Wrap(err, "drive: failed when making oauth client")
+ return fmt.Errorf("drive: failed when making oauth client: %w", err)
}
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
- return errors.Wrap(err, "couldn't create Drive client")
+ return fmt.Errorf("couldn't create Drive client: %w", err)
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
- return errors.Wrap(err, "couldn't create Drive v2 client")
+ return fmt.Errorf("couldn't create Drive v2 client: %w", err)
}
}
return nil
@@ -2925,12 +2927,12 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
isDir = true
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
if err != fs.ErrorIsDir {
- return nil, errors.Wrap(err, "can't find source")
+ return nil, fmt.Errorf("can't find source: %w", err)
}
// source was a directory
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
if err != nil {
- return nil, errors.Wrap(err, "failed to find source dir")
+ return nil, fmt.Errorf("failed to find source dir: %w", err)
}
isDir = true
} else {
@@ -2947,13 +2949,13 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
} else if err == fs.ErrorIsDir {
err = errors.New("existing directory")
}
- return nil, errors.Wrap(err, "not overwriting shortcut target")
+ return nil, fmt.Errorf("not overwriting shortcut target: %w", err)
}
// Create destination shortcut
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
if err != nil {
- return nil, errors.Wrap(err, "shortcut destination failed")
+ return nil, fmt.Errorf("shortcut destination failed: %w", err)
}
createInfo.MimeType = shortcutMimeType
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
@@ -2970,7 +2972,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
return dstFs.shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "shortcut creation failed")
+ return nil, fmt.Errorf("shortcut creation failed: %w", err)
}
if isDir {
return nil, nil
@@ -2990,7 +2992,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.Drive, err err
return defaultFs.shouldRetry(ctx, err)
})
if err != nil {
- return drives, errors.Wrap(err, "listing Team Drives failed")
+ return drives, fmt.Errorf("listing Team Drives failed: %w", err)
}
drives = append(drives, teamDrives.Drives...)
if teamDrives.NextPageToken == "" {
@@ -3033,7 +3035,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return f.shouldRetry(ctx, err)
})
if err != nil {
- err = errors.Wrap(err, "failed to restore")
+ err = fmt.Errorf("failed to restore: %w", err)
r.Errors++
fs.Errorf(remote, "%v", err)
} else {
@@ -3050,7 +3052,7 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
return false
})
if err != nil {
- err = errors.Wrap(err, "failed to list directory")
+ err = fmt.Errorf("failed to list directory: %w", err)
r.Errors++
fs.Errorf(dir, "%v", err)
}
@@ -3074,10 +3076,10 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields)
if err != nil {
- return errors.Wrap(err, "couldn't find id")
+ return fmt.Errorf("couldn't find id: %w", err)
}
if info.MimeType == driveFolderType {
- return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
+ return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3100,7 +3102,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
}
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
- return errors.Wrap(err, "copy failed")
+ return fmt.Errorf("copy failed: %w", err)
}
return nil
}
@@ -3299,7 +3301,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if ok {
targetFs, err := cache.Get(ctx, target)
if err != nil {
- return nil, errors.Wrap(err, "couldn't find target")
+ return nil, fmt.Errorf("couldn't find target: %w", err)
}
dstFs, ok = targetFs.(*Fs)
if !ok {
@@ -3338,7 +3340,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
arg = arg[2:]
err = f.copyID(ctx, id, dest)
if err != nil {
- return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
+ return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
}
}
return nil, nil
@@ -3572,11 +3574,11 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
} else {
- err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
+ err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
}
}
if err != nil {
- return nil, errors.Wrap(err, "open file failed")
+ return nil, fmt.Errorf("open file failed: %w", err)
}
}
return res.Body, nil
@@ -3740,14 +3742,14 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
}
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
- return errors.Errorf("can't update google document type without --drive-import-formats")
+ return fmt.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
if importMimeType == "" {
- return errors.Errorf("no import format found for %q", srcMimeType)
+ return fmt.Errorf("no import format found for %q", srcMimeType)
}
if importMimeType != o.documentMimeType {
- return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
+ return fmt.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
}
updateInfo.MimeType = importMimeType
diff --git a/backend/drive/drive_internal_test.go b/backend/drive/drive_internal_test.go
index 2f108eb89..cd06d15cd 100644
--- a/backend/drive/drive_internal_test.go
+++ b/backend/drive/drive_internal_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -15,7 +16,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
diff --git a/backend/dropbox/batcher.go b/backend/dropbox/batcher.go
index 098f0840c..96185dde3 100644
--- a/backend/dropbox/batcher.go
+++ b/backend/dropbox/batcher.go
@@ -8,13 +8,13 @@ package dropbox
import (
"context"
+ "errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
@@ -66,7 +66,7 @@ type batcherResponse struct {
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
- return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
+ return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
@@ -91,7 +91,7 @@ func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.
case "off":
size = 0
default:
- return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
+ return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
@@ -135,7 +135,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return err != nil, err
})
if err != nil {
- return nil, errors.Wrap(err, "batch commit failed")
+ return nil, fmt.Errorf("batch commit failed: %w", err)
}
return batchStatus, nil
}
@@ -180,7 +180,7 @@ func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *f
if err == nil {
err = errors.New("batch didn't complete")
}
- return nil, errors.Wrapf(err, "wait for batch failed after %d tries in %v", try, time.Since(startTime))
+ return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
@@ -216,13 +216,13 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
case "complete":
complete = batchStatus.Complete
default:
- return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
+ return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
- return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
+ return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
@@ -250,7 +250,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
- resp.err = errors.Errorf("batch upload failed: %s", errorTag)
+ resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
@@ -261,7 +261,7 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
// Report an error if any failed in the batch
if errorTag != "" {
- return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
+ return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go
old mode 100755
new mode 100644
index b8c69a3be..11436a327
--- a/backend/dropbox/dropbox.go
+++ b/backend/dropbox/dropbox.go
@@ -23,6 +23,7 @@ of path_display and all will be well.
import (
"context"
+ "errors"
"fmt"
"io"
"path"
@@ -38,7 +39,6 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -363,24 +363,24 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
- baseErrString := errors.Cause(err).Error()
+ errString := err.Error()
// First check for specific errors
- if strings.Contains(baseErrString, "insufficient_space") {
+ if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
- } else if strings.Contains(baseErrString, "malformed_path") {
+ } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
- fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
+ fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
- if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
+ if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
return fserrors.ShouldRetry(err), err
@@ -389,10 +389,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
- return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
+ return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -415,7 +415,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "dropbox: chunk size")
+ return nil, fmt.Errorf("dropbox: chunk size: %w", err)
}
// Convert the old token if it exists. The old token was just
@@ -427,13 +427,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
- return nil, errors.Wrap(err, "NewFS convert token")
+ return nil, fmt.Errorf("NewFS convert token: %w", err)
}
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
if err != nil {
- return nil, errors.Wrap(err, "failed to configure dropbox")
+ return nil, fmt.Errorf("failed to configure dropbox: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -474,7 +474,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
- return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
+ return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
@@ -551,7 +551,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "get current account failed")
+ return nil, fmt.Errorf("get current account failed: %w", err)
}
switch x := acc.RootInfo.(type) {
case *common.TeamRootInfo:
@@ -559,7 +559,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case *common.UserRootInfo:
f.ns = x.RootNamespaceId
default:
- return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
+ return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
@@ -710,7 +710,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "list continue")
+ return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -784,7 +784,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "list continue")
+ return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -877,7 +877,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "list continue")
+ return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -989,7 +989,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists
_, err = f.getDirMetadata(ctx, root)
if err != nil {
- return errors.Wrap(err, "Rmdir")
+ return fmt.Errorf("Rmdir: %w", err)
}
root = f.opt.Enc.FromStandardPath(root)
@@ -1007,7 +1007,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "Rmdir")
+ return fmt.Errorf("Rmdir: %w", err)
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
@@ -1073,7 +1073,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "copy failed")
+ return nil, fmt.Errorf("copy failed: %w", err)
}
// Set the metadata
@@ -1083,7 +1083,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
- return nil, errors.Wrap(err, "copy failed")
+ return nil, fmt.Errorf("copy failed: %w", err)
}
return dstObj, nil
@@ -1134,7 +1134,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "move failed")
+ return nil, fmt.Errorf("move failed: %w", err)
}
// Set the metadata
@@ -1144,7 +1144,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err = dstObj.setMetadataFromEntry(fileInfo)
if err != nil {
- return nil, errors.Wrap(err, "move failed")
+ return nil, fmt.Errorf("move failed: %w", err)
}
return dstObj, nil
}
@@ -1252,7 +1252,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "MoveDir failed")
+ return fmt.Errorf("MoveDir failed: %w", err)
}
return nil
@@ -1266,7 +1266,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "about failed")
+ return nil, fmt.Errorf("about failed: %w", err)
}
var total uint64
if q.Allocation != nil {
@@ -1406,7 +1406,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
return shouldRetry(ctx, err)
})
if err != nil {
- return "", errors.Wrap(err, "list continue")
+ return "", fmt.Errorf("list continue: %w", err)
}
cursor = changeList.Cursor
var entryType fs.EntryType
@@ -1485,7 +1485,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
err := o.readMetaData(ctx)
if err != nil {
- return "", errors.Wrap(err, "failed to read hash from metadata")
+ return "", fmt.Errorf("failed to read hash from metadata: %w", err)
}
return o.hash, nil
}
@@ -1738,7 +1738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
- return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
+ return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
}
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite"
@@ -1762,7 +1762,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
})
}
if err != nil {
- return errors.Wrap(err, "upload failed")
+ return fmt.Errorf("upload failed: %w", err)
}
// If we haven't received data back from batch upload then fake it
//
diff --git a/backend/fichier/api.go b/backend/fichier/api.go
index 54d4b2cd5..6a1746265 100644
--- a/backend/fichier/api.go
+++ b/backend/fichier/api.go
@@ -2,6 +2,8 @@ package fichier
import (
"context"
+ "errors"
+ "fmt"
"io"
"net/http"
"net/url"
@@ -10,7 +12,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
@@ -81,7 +82,7 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't read file info")
+ return nil, fmt.Errorf("couldn't read file info: %w", err)
}
return &file, err
@@ -110,7 +111,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
return doretry || !validToken(&token), err
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't list files")
+ return nil, fmt.Errorf("couldn't list files: %w", err)
}
return &token, nil
@@ -144,7 +145,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't list files")
+ return nil, fmt.Errorf("couldn't list files: %w", err)
}
entries = make([]fs.DirEntry, len(sharedFiles))
@@ -173,7 +174,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't list files")
+ return nil, fmt.Errorf("couldn't list files: %w", err)
}
for i := range filesList.Items {
item := &filesList.Items[i]
@@ -201,7 +202,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't list folders")
+ return nil, fmt.Errorf("couldn't list folders: %w", err)
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
@@ -295,7 +296,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't create folder")
+ return nil, fmt.Errorf("couldn't create folder: %w", err)
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
@@ -322,10 +323,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't remove folder")
+ return nil, fmt.Errorf("couldn't remove folder: %w", err)
}
if response.Status != "OK" {
- return nil, errors.Errorf("can't remove folder: %s", response.Message)
+ return nil, fmt.Errorf("can't remove folder: %s", response.Message)
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -352,7 +353,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't remove file")
+ return nil, fmt.Errorf("couldn't remove file: %w", err)
}
// fs.Debugf(f, "Removed file with url `%s`", url)
@@ -379,7 +380,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't copy file")
+ return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
@@ -404,7 +405,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't copy file")
+ return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return response, nil
@@ -432,7 +433,7 @@ func (f *Fs) renameFile(ctx context.Context, url string, newName string) (respon
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't rename file")
+ return nil, fmt.Errorf("couldn't rename file: %w", err)
}
return response, nil
@@ -453,7 +454,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "didnt got an upload node")
+ return nil, fmt.Errorf("didnt got an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")
@@ -497,7 +498,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't upload file")
+ return nil, fmt.Errorf("couldn't upload file: %w", err)
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
@@ -531,7 +532,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't finish file upload")
+ return nil, fmt.Errorf("couldn't finish file upload: %w", err)
}
return response, err
diff --git a/backend/fichier/fichier.go b/backend/fichier/fichier.go
index 92a41432c..b20f2ed31 100644
--- a/backend/fichier/fichier.go
+++ b/backend/fichier/fichier.go
@@ -2,6 +2,7 @@ package fichier
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -9,7 +10,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -454,10 +454,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
- return nil, errors.Wrap(err, "couldn't rename file")
+ return nil, fmt.Errorf("couldn't rename file: %w", err)
}
if resp.Status != "OK" {
- return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
+ return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
@@ -467,10 +467,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
- return nil, errors.Wrap(err, "couldn't move file")
+ return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
- return nil, errors.Errorf("couldn't move file: %s", resp.Message)
+ return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
}
@@ -503,10 +503,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
- return nil, errors.Wrap(err, "couldn't move file")
+ return nil, fmt.Errorf("couldn't move file: %w", err)
}
if resp.Status != "OK" {
- return nil, errors.Errorf("couldn't move file: %s", resp.Message)
+ return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
diff --git a/backend/fichier/object.go b/backend/fichier/object.go
index d48047035..73723e223 100644
--- a/backend/fichier/object.go
+++ b/backend/fichier/object.go
@@ -2,11 +2,12 @@ package fichier
import (
"context"
+ "errors"
+ "fmt"
"io"
"net/http"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
@@ -122,7 +123,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
- return errors.Wrap(err, "failed to remove old version")
+ return fmt.Errorf("failed to remove old version: %w", err)
}
// Replace guts of old object with new one
diff --git a/backend/filefabric/filefabric.go b/backend/filefabric/filefabric.go
index 61a303a23..7f49579b4 100644
--- a/backend/filefabric/filefabric.go
+++ b/backend/filefabric/filefabric.go
@@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/base64"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -32,7 +33,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/filefabric/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -267,7 +267,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
"pid": rootID,
}, &resp, nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to check path exists")
+ return nil, fmt.Errorf("failed to check path exists: %w", err)
}
if resp.Exists != "y" {
return nil, fs.ErrorObjectNotFound
@@ -308,7 +308,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
"token": "*",
}, &applianceInfo, nil)
if err != nil {
- return errors.Wrap(err, "failed to read appliance version")
+ return fmt.Errorf("failed to read appliance version: %w", err)
}
f.opt.Version = applianceInfo.SoftwareVersionLabel
f.m.Set("version", f.opt.Version)
@@ -349,7 +349,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
"authtoken": f.opt.PermanentToken,
}, &info, nil)
if err != nil {
- return "", errors.Wrap(err, "failed to get session token")
+ return "", fmt.Errorf("failed to get session token: %w", err)
}
refreshed = true
now = now.Add(tokenLifeTime)
@@ -562,7 +562,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
"fi_name": f.opt.Enc.FromStandardName(leaf),
}, &info, nil)
if err != nil {
- return "", errors.Wrap(err, "failed to create directory")
+ return "", fmt.Errorf("failed to create directory: %w", err)
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.Item.ID, nil
@@ -595,7 +595,7 @@ OUTER:
var info api.GetFolderContentsResponse
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
if err != nil {
- return false, errors.Wrap(err, "failed to list directory")
+ return false, fmt.Errorf("failed to list directory: %w", err)
}
for i := range info.Items {
item := &info.Items[i]
@@ -726,7 +726,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
"completedeletion": "n",
}, &info, nil)
if err != nil {
- return errors.Wrap(err, "failed to delete file")
+ return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
@@ -763,7 +763,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}, &info, nil)
f.dirCache.FlushDir(dir)
if err != nil {
- return errors.Wrap(err, "failed to remove directory")
+ return fmt.Errorf("failed to remove directory: %w", err)
}
return nil
}
@@ -825,7 +825,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to copy file")
+ return nil, fmt.Errorf("failed to copy file: %w", err)
}
err = dstObj.setMetaData(&info.Item)
if err != nil {
@@ -857,7 +857,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err
"taskid": taskID,
}, &info, nil)
if err != nil {
- return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
+ return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
}
if len(info.Tasks) == 0 {
// task has finished
@@ -890,7 +890,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
"fi_name": newLeaf,
}, &info, nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to rename leaf")
+ return nil, fmt.Errorf("failed to rename leaf: %w", err)
}
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
if err != nil {
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
"dir_id": newDirectoryID,
}, &info, nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to move file to new directory")
+ return nil, fmt.Errorf("failed to move file to new directory: %w", err)
}
item = &info.Item
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
@@ -1037,7 +1037,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
var info api.EmptyResponse
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
if err != nil {
- return errors.Wrap(err, "failed to empty trash")
+ return fmt.Errorf("failed to empty trash: %w", err)
}
return nil
}
@@ -1164,7 +1164,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
"data": data.String(),
}, &info, nil)
if err != nil {
- return errors.Wrap(err, "failed to update metadata")
+ return fmt.Errorf("failed to update metadata: %w", err)
}
return o.setMetaData(&info.Item)
}
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
if err != nil {
- return errors.Wrap(err, "failed to initialize upload")
+ return fmt.Errorf("failed to initialize upload: %w", err)
}
// Cancel the upload if aborted or it fails
@@ -1290,13 +1290,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err, nil, try)
})
if err != nil {
- return errors.Wrap(err, "failed to upload")
+ return fmt.Errorf("failed to upload: %w", err)
}
if uploader.Success != "y" {
- return errors.Errorf("upload failed")
+ return fmt.Errorf("upload failed")
}
if size > 0 && uploader.FileSize != size {
- return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
+ return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
}
// Now finalize the file
@@ -1308,7 +1308,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
if err != nil {
- return errors.Wrap(err, "failed to finalize upload")
+ return fmt.Errorf("failed to finalize upload: %w", err)
}
finalized = true
diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go
index b5de006e5..11ae2764d 100644
--- a/backend/ftp/ftp.go
+++ b/backend/ftp/ftp.go
@@ -4,6 +4,8 @@ package ftp
import (
"context"
"crypto/tls"
+ "errors"
+ "fmt"
"io"
"net"
"net/textproto"
@@ -14,7 +16,6 @@ import (
"time"
"github.com/jlaffaye/ftp"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -349,7 +350,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return false, nil
})
if err != nil {
- err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
+ err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
}
return c, err
}
@@ -396,8 +397,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
- _, isRegularError := errors.Cause(err).(*textproto.Error)
- if !isRegularError {
+ var tpErr *textproto.Error
+ if !errors.As(err, &tpErr) {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -445,7 +446,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
pass, err := obscure.Reveal(opt.Pass)
if err != nil {
- return nil, errors.Wrap(err, "NewFS decrypt password")
+ return nil, fmt.Errorf("NewFS decrypt password: %w", err)
}
user := opt.User
if user == "" {
@@ -502,7 +503,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "NewFs")
+ return nil, fmt.Errorf("NewFs: %w", err)
}
f.fGetTime = c.IsGetTimeSupported()
f.fSetTime = c.IsSetTimeSupported()
@@ -520,7 +521,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
_, err := f.NewObject(ctx, remote)
if err != nil {
- if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
+ if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -599,7 +600,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
c, err := f.getFtpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "findItem")
+ return nil, fmt.Errorf("findItem: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
@@ -643,7 +644,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote)
if err != nil {
- return false, errors.Wrap(err, "dirExists")
+ return false, fmt.Errorf("dirExists: %w", err)
}
if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil
@@ -664,7 +665,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
c, err := f.getFtpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "list")
+ return nil, fmt.Errorf("list: %w", err)
}
var listErr error
@@ -702,7 +703,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if len(files) == 0 {
exists, err := f.dirExists(ctx, dir)
if err != nil {
- return nil, errors.Wrap(err, "list")
+ return nil, fmt.Errorf("list: %w", err)
}
if !exists {
return nil, fs.ErrorDirNotFound
@@ -766,7 +767,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
- return nil, errors.Wrap(err, "Put mkParentDir failed")
+ return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
}
o := &Object{
fs: f,
@@ -789,7 +790,7 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
c, err := f.getFtpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "getInfo")
+ return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
@@ -827,7 +828,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
- return errors.Wrapf(err, "mkdir %q failed", abspath)
+ return fmt.Errorf("mkdir %q failed: %w", abspath, err)
}
parent := path.Dir(abspath)
err = f.mkdir(ctx, parent)
@@ -836,7 +837,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
c, connErr := f.getFtpConnection(ctx)
if connErr != nil {
- return errors.Wrap(connErr, "mkdir")
+ return fmt.Errorf("mkdir: %w", connErr)
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
@@ -872,7 +873,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx)
if err != nil {
- return errors.Wrap(translateErrorFile(err), "Rmdir")
+ return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
}
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
@@ -888,11 +889,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err := f.mkParentDir(ctx, remote)
if err != nil {
- return nil, errors.Wrap(err, "Move mkParentDir failed")
+ return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
}
c, err := f.getFtpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Move")
+ return nil, fmt.Errorf("Move: %w", err)
}
err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
@@ -900,11 +901,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
)
f.putFtpConnection(&c, err)
if err != nil {
- return nil, errors.Wrap(err, "Move Rename failed")
+ return nil, fmt.Errorf("Move Rename failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
- return nil, errors.Wrap(err, "Move NewObject failed")
+ return nil, fmt.Errorf("Move NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -934,19 +935,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
- return errors.Wrapf(err, "DirMove getInfo failed")
+ return fmt.Errorf("DirMove getInfo failed: %w", err)
}
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
- return errors.Wrap(err, "DirMove mkParentDir dst failed")
+ return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
// Do the move
c, err := f.getFtpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "DirMove")
+ return fmt.Errorf("DirMove: %w", err)
}
err = c.Rename(
f.dirFromStandardPath(srcPath),
@@ -954,7 +955,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
)
f.putFtpConnection(&c, err)
if err != nil {
- return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
+ return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
}
return nil
}
@@ -1111,12 +1112,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "open")
+ return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
- return nil, errors.Wrap(err, "open")
+ return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
@@ -1146,7 +1147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "Update")
+ return fmt.Errorf("Update: %w", err)
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
@@ -1164,15 +1165,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err)
remove()
- return errors.Wrap(err, "update stor")
+ return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
- return errors.Wrap(err, "SetModTime")
+ return fmt.Errorf("SetModTime: %w", err)
}
o.info, err = o.fs.getInfo(ctx, path)
if err != nil {
- return errors.Wrap(err, "update getinfo")
+ return fmt.Errorf("update getinfo: %w", err)
}
return nil
}
@@ -1191,7 +1192,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} else {
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "Remove")
+ return fmt.Errorf("Remove: %w", err)
}
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err)
diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go
index 2e8b5125c..0a4442bde 100644
--- a/backend/googlecloudstorage/googlecloudstorage.go
+++ b/backend/googlecloudstorage/googlecloudstorage.go
@@ -16,6 +16,7 @@ import (
"context"
"encoding/base64"
"encoding/hex"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -25,7 +26,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -375,7 +375,7 @@ func (o *Object) split() (bucket, bucketPath string) {
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
- return nil, errors.Wrap(err, "error processing credentials")
+ return nil, fmt.Errorf("error processing credentials: %w", err)
}
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
@@ -408,7 +408,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
- return nil, errors.Wrap(err, "error opening service account credentials file")
+ return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
@@ -417,7 +417,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil {
- return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
+ return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
@@ -425,7 +425,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
+ return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
}
}
@@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
- return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
+ return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}
if f.rootBucket != "" && f.rootDirectory != "" {
@@ -759,10 +759,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
- return errors.Wrap(err, "failed to get bucket")
+ return fmt.Errorf("failed to get bucket: %w", err)
}
} else {
- return errors.Wrap(err, "failed to get bucket")
+ return fmt.Errorf("failed to get bucket: %w", err)
}
if f.opt.ProjectNumber == "" {
@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error
- return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
+ return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
diff --git a/backend/googlephotos/googlephotos.go b/backend/googlephotos/googlephotos.go
index 9bd4fd44d..7a07356be 100644
--- a/backend/googlephotos/googlephotos.go
+++ b/backend/googlephotos/googlephotos.go
@@ -6,6 +6,7 @@ package googlephotos
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -17,7 +18,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -85,7 +85,7 @@ func init() {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
- return nil, errors.Wrap(err, "couldn't parse config into struct")
+ return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
}
switch config.State {
@@ -292,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Box")
+ return nil, fmt.Errorf("failed to configure Box: %w", err)
}
root = strings.Trim(path.Clean(root), "/")
@@ -345,13 +345,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return "", errors.Wrap(err, "couldn't read openID config")
+ return "", fmt.Errorf("couldn't read openID config: %w", err)
}
// Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string)
if !ok {
- return "", errors.Errorf("couldn't find %q from openID config", name)
+ return "", fmt.Errorf("couldn't find %q from openID config", name)
}
return endpoint, nil
@@ -374,7 +374,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't read user info")
+ return nil, fmt.Errorf("couldn't read user info: %w", err)
}
return userInfo, nil
}
@@ -405,7 +405,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't revoke token")
+ return fmt.Errorf("couldn't revoke token: %w", err)
}
fs.Infof(f, "res = %+v", res)
return nil
@@ -492,7 +492,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't list albums")
+ return nil, fmt.Errorf("couldn't list albums: %w", err)
}
newAlbums := result.Albums
if shared {
@@ -549,7 +549,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't list files")
+ return fmt.Errorf("couldn't list files: %w", err)
}
items := result.MediaItems
if len(items) > 0 && items[0].ID == lastID {
@@ -693,7 +693,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't create album")
+ return nil, fmt.Errorf("couldn't create album: %w", err)
}
f.albums[false].add(&result)
return &result, nil
@@ -879,7 +879,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't get media item")
+ return fmt.Errorf("couldn't get media item: %w", err)
}
o.setMetaData(&item)
return nil
@@ -1014,7 +1014,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't upload file")
+ return fmt.Errorf("couldn't upload file: %w", err)
}
uploadToken := strings.TrimSpace(string(token))
if uploadToken == "" {
@@ -1042,14 +1042,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to create media item")
+ return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
- return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
+ return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
@@ -1071,7 +1071,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
albumTitle, fileName := match[1], match[2]
album, ok := o.fs.albums[false].get(albumTitle)
if !ok {
- return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
+ return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
}
opts := rest.Opts{
Method: "POST",
@@ -1087,7 +1087,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't delete item from album")
+ return fmt.Errorf("couldn't delete item from album: %w", err)
}
return nil
}
diff --git a/backend/googlephotos/pattern.go b/backend/googlephotos/pattern.go
index a0e5f695e..e2cbcbf72 100644
--- a/backend/googlephotos/pattern.go
+++ b/backend/googlephotos/pattern.go
@@ -11,7 +11,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
@@ -270,7 +269,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
- return nil, errors.Errorf("bad year %q", match[1])
+ return nil, fmt.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
@@ -284,7 +283,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
- return sf, errors.Errorf("bad year %q", match[1])
+ return sf, fmt.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
@@ -300,14 +299,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
- return sf, errors.Errorf("bad month %q", match[2])
+ return sf, fmt.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
- return sf, errors.Errorf("bad day %q", match[3])
+ return sf, fmt.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
diff --git a/backend/hasher/commands.go b/backend/hasher/commands.go
index 99101daba..536981953 100644
--- a/backend/hasher/commands.go
+++ b/backend/hasher/commands.go
@@ -2,9 +2,10 @@ package hasher
import (
"context"
+ "errors"
+ "fmt"
"path"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -118,18 +119,18 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
case fs.ErrorIsFile:
// ok
case nil:
- return errors.Errorf("not a file: %s", sumRemote)
+ return fmt.Errorf("not a file: %s", sumRemote)
default:
return err
}
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
if err != nil {
- return errors.Wrap(err, "cannot open sum file")
+ return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := operations.ParseSumFile(ctx, sumObj)
if err != nil {
- return errors.Wrap(err, "failed to parse sum file")
+ return fmt.Errorf("failed to parse sum file: %w", err)
}
if sticky {
diff --git a/backend/hasher/hasher.go b/backend/hasher/hasher.go
index 1a7df38fc..2d94895ee 100644
--- a/backend/hasher/hasher.go
+++ b/backend/hasher/hasher.go
@@ -4,6 +4,7 @@ package hasher
import (
"context"
"encoding/gob"
+ "errors"
"fmt"
"io"
"path"
@@ -11,7 +12,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
@@ -102,7 +102,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
baseFs, err := cache.Get(ctx, remotePath)
if err != nil && err != fs.ErrorIsFile {
- return nil, errors.Wrapf(err, "failed to derive base remote %q", opt.Remote)
+ return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
}
f := &Fs{
@@ -127,7 +127,7 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
for _, hashName := range opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
- return nil, errors.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
+ return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
}
if !f.slowHashes.Contains(ht) {
f.autoHashes.Add(ht)
diff --git a/backend/hasher/kv.go b/backend/hasher/kv.go
index 881661b4f..4f6754796 100644
--- a/backend/hasher/kv.go
+++ b/backend/hasher/kv.go
@@ -4,11 +4,11 @@ import (
"bytes"
"context"
"encoding/gob"
+ "errors"
"fmt"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
@@ -199,10 +199,10 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil {
- return errors.Wrap(err, "marshal failed")
+ return fmt.Errorf("marshal failed: %w", err)
}
if err = b.Put([]byte(op.key), data); err != nil {
- return errors.Wrap(err, "put failed")
+ return fmt.Errorf("put failed: %w", err)
}
return err
}
diff --git a/backend/hasher/object.go b/backend/hasher/object.go
index 3c6e5af78..1233740e6 100644
--- a/backend/hasher/object.go
+++ b/backend/hasher/object.go
@@ -2,13 +2,13 @@ package hasher
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
diff --git a/backend/http/http.go b/backend/http/http.go
index 8b68b0034..47a878bb3 100644
--- a/backend/http/http.go
+++ b/backend/http/http.go
@@ -6,6 +6,8 @@ package http
import (
"context"
+ "errors"
+ "fmt"
"io"
"mime"
"net/http"
@@ -16,7 +18,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -132,7 +133,7 @@ func statusError(res *http.Response, err error) error {
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
- return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
+ return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
}
return nil
}
@@ -377,15 +378,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
- return nil, errors.Wrap(err, "failed to readDir")
+ return nil, fmt.Errorf("failed to readDir: %w", err)
}
if !strings.HasSuffix(URL, "/") {
- return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
+ return nil, fmt.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil)
if err != nil {
- return nil, errors.Wrap(err, "readDir failed")
+ return nil, fmt.Errorf("readDir failed: %w", err)
}
f.addHeaders(req)
res, err := f.httpClient.Do(req)
@@ -397,7 +398,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
}
err = statusError(res, err)
if err != nil {
- return nil, errors.Wrap(err, "failed to readDir")
+ return nil, fmt.Errorf("failed to readDir: %w", err)
}
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
@@ -405,10 +406,10 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
- return nil, errors.Wrap(err, "readDir")
+ return nil, fmt.Errorf("readDir: %w", err)
}
default:
- return nil, errors.Errorf("Can't parse content type %q", contentType)
+ return nil, fmt.Errorf("Can't parse content type %q", contentType)
}
return names, nil
}
@@ -428,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
names, err := f.readDir(ctx, dir)
if err != nil {
- return nil, errors.Wrapf(err, "error listing %q", dir)
+ return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
var (
entriesMu sync.Mutex // to protect entries
@@ -540,7 +541,7 @@ func (o *Object) stat(ctx context.Context) error {
url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
- return errors.Wrap(err, "stat failed")
+ return fmt.Errorf("stat failed: %w", err)
}
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
@@ -549,7 +550,7 @@ func (o *Object) stat(ctx context.Context) error {
}
err = statusError(res, err)
if err != nil {
- return errors.Wrap(err, "failed to stat")
+ return fmt.Errorf("failed to stat: %w", err)
}
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
@@ -562,7 +563,7 @@ func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
- return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
+ return fmt.Errorf("failed to parse Content-Type: %q: %w", o.contentType, err)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
@@ -588,7 +589,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
- return nil, errors.Wrap(err, "Open failed")
+ return nil, fmt.Errorf("Open failed: %w", err)
}
// Add optional headers
@@ -601,7 +602,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
- return nil, errors.Wrap(err, "Open failed")
+ return nil, fmt.Errorf("Open failed: %w", err)
}
return res.Body, nil
}
diff --git a/backend/hubic/hubic.go b/backend/hubic/hubic.go
index fcd43ad7f..ccf828ea3 100644
--- a/backend/hubic/hubic.go
+++ b/backend/hubic/hubic.go
@@ -9,6 +9,7 @@ package hubic
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -16,7 +17,6 @@ import (
"time"
swiftLib "github.com/ncw/swift/v2"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
@@ -120,7 +120,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
- return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
+ return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
@@ -146,7 +146,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Hubic")
+ return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
@@ -163,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = c.Authenticate(ctx)
if err != nil {
- return nil, errors.Wrap(err, "error authenticating swift connection")
+ return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
diff --git a/backend/jottacloud/api/types.go b/backend/jottacloud/api/types.go
index 83dbf01a8..db1e22aa6 100644
--- a/backend/jottacloud/api/types.go
+++ b/backend/jottacloud/api/types.go
@@ -2,10 +2,9 @@ package api
import (
"encoding/xml"
+ "errors"
"fmt"
"time"
-
- "github.com/pkg/errors"
)
const (
diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go
index 64898d9f6..6fa0329e9 100644
--- a/backend/jottacloud/jottacloud.go
+++ b/backend/jottacloud/jottacloud.go
@@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -19,7 +20,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/jottacloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -146,12 +146,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
if err != nil {
- return nil, errors.Wrap(err, "failed to get oauth token")
+ return nil, fmt.Errorf("failed to get oauth token: %w", err)
}
m.Set(configTokenURL, tokenEndpoint)
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
- return nil, errors.Wrap(err, "error while saving token")
+ return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication
@@ -168,7 +168,7 @@ machines.`)
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
- return nil, errors.Wrap(err, "failed to register device")
+ return nil, fmt.Errorf("failed to register device: %w", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
@@ -216,11 +216,11 @@ machines.`)
m.Set("password", "")
m.Set("auth_code", "")
if err != nil {
- return nil, errors.Wrap(err, "failed to get oauth token")
+ return nil, fmt.Errorf("failed to get oauth token: %w", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
- return nil, errors.Wrap(err, "error while saving token")
+ return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
@@ -529,7 +529,7 @@ func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.Custom
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
- return nil, errors.Wrap(err, "couldn't get customer info")
+ return nil, fmt.Errorf("couldn't get customer info: %w", err)
}
return info, nil
@@ -544,7 +544,7 @@ func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
- return nil, errors.Wrap(err, "couldn't get drive info")
+ return nil, fmt.Errorf("couldn't get drive info: %w", err)
}
return info, nil
@@ -559,7 +559,7 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
- return nil, errors.Wrap(err, "couldn't get device info")
+ return nil, fmt.Errorf("couldn't get device info: %w", err)
}
return info, nil
@@ -597,7 +597,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
}
if err != nil {
- return nil, errors.Wrap(err, "read metadata failed")
+ return nil, fmt.Errorf("read metadata failed: %w", err)
}
if result.XMLName.Local == "folder" {
return nil, fs.ErrorIsDir
@@ -720,7 +720,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
// Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
- return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
+ return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
}
return oAuthClient, ts, nil
}
@@ -786,7 +786,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(context.TODO(), remote)
if err != nil {
- if uErr := errors.Cause(err); uErr == fs.ErrorObjectNotFound || uErr == fs.ErrorNotAFile || uErr == fs.ErrorIsDir {
+ if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) || errors.Is(err, fs.ErrorIsDir) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -881,7 +881,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, fs.ErrorDirNotFound
}
}
- return nil, errors.Wrap(err, "couldn't list files")
+ return nil, fmt.Errorf("couldn't list files: %w", err)
}
if !f.validFolder(&result) {
@@ -981,7 +981,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound
}
}
- return errors.Wrap(err, "couldn't list files")
+ return fmt.Errorf("couldn't list files: %w", err)
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
@@ -1081,7 +1081,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't purge directory")
+ return fmt.Errorf("couldn't purge directory: %w", err)
}
return nil
@@ -1148,7 +1148,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
if err != nil {
- return nil, errors.Wrap(err, "couldn't copy file")
+ return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return f.newObjectWithInfo(ctx, remote, info)
@@ -1178,7 +1178,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err != nil {
- return nil, errors.Wrap(err, "couldn't move file")
+ return nil, fmt.Errorf("couldn't move file: %w", err)
}
return f.newObjectWithInfo(ctx, remote, info)
@@ -1222,7 +1222,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil {
- return errors.Wrap(err, "couldn't move directory")
+ return fmt.Errorf("couldn't move directory: %w", err)
}
return nil
}
@@ -1256,13 +1256,13 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
if err != nil {
if unlink {
- return "", errors.Wrap(err, "couldn't remove public link")
+ return "", fmt.Errorf("couldn't remove public link: %w", err)
}
- return "", errors.Wrap(err, "couldn't create public link")
+ return "", fmt.Errorf("couldn't create public link: %w", err)
}
if unlink {
if result.PublicURI != "" {
- return "", errors.Errorf("couldn't remove public link - %q", result.PublicURI)
+ return "", fmt.Errorf("couldn't remove public link - %q", result.PublicURI)
}
return "", nil
}
@@ -1322,7 +1322,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
- return errors.Wrap(err, "couldn't empty trash")
+ return fmt.Errorf("couldn't empty trash: %w", err)
}
return nil
@@ -1584,7 +1584,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
- return errors.Wrap(err, "failed to remove old object")
+ return fmt.Errorf("failed to remove old object: %w", err)
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
@@ -1605,7 +1605,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
defer cleanup()
if err != nil {
- return errors.Wrap(err, "failed to calculate MD5")
+ return fmt.Errorf("failed to calculate MD5: %w", err)
}
// Wrap the accounting back onto the stream
in = wrap(in)
diff --git a/backend/local/about_unix.go b/backend/local/about_unix.go
index 427c62872..86f28d74f 100644
--- a/backend/local/about_unix.go
+++ b/backend/local/about_unix.go
@@ -5,10 +5,10 @@ package local
import (
"context"
+ "fmt"
"os"
"syscall"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -20,7 +20,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
- return nil, errors.Wrap(err, "failed to read disk usage")
+ return nil, fmt.Errorf("failed to read disk usage: %w", err)
}
bs := int64(s.Bsize) // nolint: unconvert
usage := &fs.Usage{
diff --git a/backend/local/about_windows.go b/backend/local/about_windows.go
index cc8332534..d8daed764 100644
--- a/backend/local/about_windows.go
+++ b/backend/local/about_windows.go
@@ -5,10 +5,10 @@ package local
import (
"context"
+ "fmt"
"syscall"
"unsafe"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -24,7 +24,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
- return nil, errors.Wrap(e1, "failed to read disk usage")
+ return nil, fmt.Errorf("failed to read disk usage: %w", e1)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
diff --git a/backend/local/local.go b/backend/local/local.go
index 90f3e9a3f..5dc65ec6a 100644
--- a/backend/local/local.go
+++ b/backend/local/local.go
@@ -4,6 +4,7 @@ package local
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -16,7 +17,6 @@ import (
"time"
"unicode/utf8"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -432,7 +432,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fd, err := os.Open(fsDirPath)
if err != nil {
isPerm := os.IsPermission(err)
- err = errors.Wrapf(err, "failed to open directory %q", dir)
+ err = fmt.Errorf("failed to open directory %q: %w", dir, err)
fs.Errorf(dir, "%v", err)
if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
@@ -443,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
- err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
+ err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
}
}()
@@ -473,7 +473,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
if fierr != nil {
- err = errors.Wrapf(err, "failed to read directory %q", namepath)
+ err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -483,7 +483,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
}
if err != nil {
- return nil, errors.Wrap(err, "failed to read directory entry")
+ return nil, fmt.Errorf("failed to read directory entry: %w", err)
}
for _, fi := range fis {
@@ -496,7 +496,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fi, err = os.Stat(localPath)
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
- err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
+ err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
continue
@@ -672,7 +672,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return err
}
if !fi.Mode().IsDir() {
- return errors.Errorf("can't purge non directory: %q", dir)
+ return fmt.Errorf("can't purge non directory: %q", dir)
}
return os.RemoveAll(dir)
}
@@ -866,12 +866,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
err := o.lstat()
var changed bool
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
- return "", errors.Wrap(err, "hash: failed to stat")
+ return "", fmt.Errorf("hash: failed to stat: %w", err)
}
} else {
o.fs.objectMetaMu.RLock()
@@ -900,16 +900,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
- return "", errors.Wrap(err, "hash: failed to open")
+ return "", fmt.Errorf("hash: failed to open: %w", err)
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
- return "", errors.Wrap(err, "hash: failed to read")
+ return "", fmt.Errorf("hash: failed to read: %w", err)
}
if closeErr != nil {
- return "", errors.Wrap(closeErr, "hash: failed to close")
+ return "", fmt.Errorf("hash: failed to close: %w", closeErr)
}
hashValue = hashes[r]
o.fs.objectMetaMu.Lock()
@@ -990,17 +990,17 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
- return 0, errors.Wrap(err, "can't read status of source file while transferring")
+ return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
}
file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime
oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() {
- return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
+ return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
}
if !oldtime.Equal(fi.ModTime()) {
- return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
+ return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
}
}
diff --git a/backend/mailru/api/helpers.go b/backend/mailru/api/helpers.go
index 2b4ebb3b5..1b43eb182 100644
--- a/backend/mailru/api/helpers.go
+++ b/backend/mailru/api/helpers.go
@@ -6,11 +6,11 @@ import (
"bufio"
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/lib/readers"
)
diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go
index 9aa969085..4551eeae7 100644
--- a/backend/mailru/mailru.go
+++ b/backend/mailru/mailru.go
@@ -3,6 +3,7 @@ package mailru
import (
"bytes"
"context"
+ "errors"
"fmt"
gohash "hash"
"io"
@@ -40,7 +41,6 @@ import (
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
- "github.com/pkg/errors"
"golang.org/x/oauth2"
)
@@ -438,7 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
err = errors.New("Invalid token")
}
if err != nil {
- return errors.Wrap(err, "Failed to authorize")
+ return fmt.Errorf("Failed to authorize: %w", err)
}
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
@@ -507,7 +507,7 @@ func (f *Fs) reAuthorize(opts *rest.Opts, origErr error) error {
func (f *Fs) accessToken() (string, error) {
token, err := f.source.Token()
if err != nil {
- return "", errors.Wrap(err, "cannot refresh access token")
+ return "", fmt.Errorf("cannot refresh access token: %w", err)
}
return token.AccessToken, nil
}
@@ -1196,7 +1196,7 @@ func (f *Fs) purgeWithCheck(ctx context.Context, dir string, check bool, opName
_, dirSize, err := f.readItemMetaData(ctx, path)
if err != nil {
- return errors.Wrapf(err, "%s failed", opName)
+ return fmt.Errorf("%s failed: %w", opName, err)
}
if check && dirSize > 0 {
return fs.ErrorDirectoryNotEmpty
@@ -1300,7 +1300,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't copy file")
+ return nil, fmt.Errorf("couldn't copy file: %w", err)
}
if response.Status != 200 {
return nil, fmt.Errorf("copy failed with code %d", response.Status)
@@ -1684,7 +1684,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
if err != nil {
- return errors.Wrap(err, "Failed to create spool file")
+ return fmt.Errorf("Failed to create spool file: %w", err)
}
if o.putByHash(ctx, mrHash, src, "spool") {
// If put by hash is successful, ignore transitive error
@@ -2318,7 +2318,7 @@ func (p *serverPool) Dispatch(ctx context.Context, current string) (string, erro
})
if err != nil || url == "" {
closeBody(res)
- return "", errors.Wrap(err, "Failed to request file server")
+ return "", fmt.Errorf("Failed to request file server: %w", err)
}
p.addServer(url, now)
diff --git a/backend/mega/mega.go b/backend/mega/mega.go
index 5615cd640..7b38fdf58 100644
--- a/backend/mega/mega.go
+++ b/backend/mega/mega.go
@@ -17,6 +17,7 @@ Improvements:
import (
"context"
+ "errors"
"fmt"
"io"
"path"
@@ -24,7 +25,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -165,13 +165,6 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
// Let the mega library handle the low level retries
return false, err
- /*
- switch errors.Cause(err) {
- case mega.EAGAIN, mega.ERATELIMIT, mega.ETEMPUNAVAIL:
- return true, err
- }
- return fserrors.ShouldRetry(err), err
- */
}
// readMetaDataForPath reads the metadata from the path
@@ -195,7 +188,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
- return nil, errors.Wrap(err, "couldn't decrypt password")
+ return nil, fmt.Errorf("couldn't decrypt password: %w", err)
}
}
ci := fs.GetConfig(ctx)
@@ -222,7 +215,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err := srv.Login(opt.User, opt.Pass)
if err != nil {
- return nil, errors.Wrap(err, "couldn't login")
+ return nil, fmt.Errorf("couldn't login: %w", err)
}
megaCache[opt.User] = srv
}
@@ -350,11 +343,11 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
break
}
if err != mega.ENOENT {
- return nil, errors.Wrap(err, "mkdir lookup failed")
+ return nil, fmt.Errorf("mkdir lookup failed: %w", err)
}
}
if err != nil {
- return nil, errors.Wrap(err, "internal error: mkdir called with non-existent root node")
+ return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
}
// i is number of directories to create (may be 0)
// node is directory to create them from
@@ -365,7 +358,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "mkdir create node failed")
+ return nil, fmt.Errorf("mkdir create node failed: %w", err)
}
}
return node, nil
@@ -428,7 +421,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
if err != nil {
- return errors.Wrap(err, "CleanUp failed to list items in trash")
+ return fmt.Errorf("CleanUp failed to list items in trash: %w", err)
}
fs.Infof(f, "Deleting %d items from the trash", len(items))
errors := 0
@@ -489,7 +482,7 @@ type listFn func(*mega.Node) bool
func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
nodes, err := f.srv.FS.GetChildren(dir)
if err != nil {
- return false, errors.Wrapf(err, "list failed")
+ return false, fmt.Errorf("list failed: %w", err)
}
for _, item := range nodes {
if fn(item) {
@@ -609,7 +602,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
_, err = f.mkdir(ctx, rootNode, dir)
- return errors.Wrap(err, "Mkdir failed")
+ return fmt.Errorf("Mkdir failed: %w", err)
}
// deleteNode removes a file or directory, observing useTrash
@@ -639,7 +632,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
children, err := f.srv.FS.GetChildren(dirNode)
if err != nil {
- return errors.Wrap(err, "purgeCheck GetChildren failed")
+ return fmt.Errorf("purgeCheck GetChildren failed: %w", err)
}
if len(children) > 0 {
return fs.ErrorDirectoryNotEmpty
@@ -650,7 +643,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
err = f.deleteNode(ctx, dirNode)
if err != nil {
- return errors.Wrap(err, "delete directory node failed")
+ return fmt.Errorf("delete directory node failed: %w", err)
}
// Remove the root node if we just deleted it
@@ -704,7 +697,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
}
if err != nil {
- return errors.Wrap(err, "server-side move failed to make dst parent dir")
+ return fmt.Errorf("server-side move failed to make dst parent dir: %w", err)
}
if srcRemote != "" {
@@ -717,7 +710,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
srcDirNode, err = f.findDir(absRoot, srcParent)
}
if err != nil {
- return errors.Wrap(err, "server-side move failed to lookup src parent dir")
+ return fmt.Errorf("server-side move failed to lookup src parent dir: %w", err)
}
// move the object into its new directory if required
@@ -728,7 +721,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "server-side move failed")
+ return fmt.Errorf("server-side move failed: %w", err)
}
}
@@ -742,7 +735,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "server-side rename failed")
+ return fmt.Errorf("server-side rename failed: %w", err)
}
}
@@ -812,7 +805,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
- return errors.Wrap(err, "DirMove error while checking dest directory")
+ return fmt.Errorf("DirMove error while checking dest directory: %w", err)
}
// Do the move
@@ -844,15 +837,15 @@ func (f *Fs) Hashes() hash.Set {
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
root, err := f.findRoot(ctx, false)
if err != nil {
- return "", errors.Wrap(err, "PublicLink failed to find root node")
+ return "", fmt.Errorf("PublicLink failed to find root node: %w", err)
}
node, err := f.findNode(root, remote)
if err != nil {
- return "", errors.Wrap(err, "PublicLink failed to find path")
+ return "", fmt.Errorf("PublicLink failed to find path: %w", err)
}
link, err = f.srv.Link(node, true)
if err != nil {
- return "", errors.Wrap(err, "PublicLink failed to create link")
+ return "", fmt.Errorf("PublicLink failed to create link: %w", err)
}
return link, nil
}
@@ -867,13 +860,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
dstDir := dirs[0]
dstDirNode := f.srv.FS.HashLookup(dstDir.ID())
if dstDirNode == nil {
- return errors.Errorf("MergeDirs failed to find node for: %v", dstDir)
+ return fmt.Errorf("MergeDirs failed to find node for: %v", dstDir)
}
for _, srcDir := range dirs[1:] {
// find src directory
srcDirNode := f.srv.FS.HashLookup(srcDir.ID())
if srcDirNode == nil {
- return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
+ return fmt.Errorf("MergeDirs failed to find node for: %v", srcDir)
}
// list the objects
@@ -883,7 +876,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return false
})
if err != nil {
- return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
+ return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err)
}
// move them into place
for _, info := range infos {
@@ -893,14 +886,14 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
+ return fmt.Errorf("MergeDirs move failed on %q in %v: %w", f.opt.Enc.ToStandardName(info.GetName()), srcDir, err)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(ctx, srcDirNode)
if err != nil {
- return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
+ return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err)
}
}
return nil
@@ -915,7 +908,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get Mega Quota")
+ return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
@@ -1076,7 +1069,7 @@ func (oo *openObject) Close() (err error) {
return shouldRetry(oo.ctx, err)
})
if err != nil {
- return errors.Wrap(err, "failed to finish download")
+ return fmt.Errorf("failed to finish download: %w", err)
}
oo.closed = true
return nil
@@ -1104,7 +1097,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "open download file failed")
+ return nil, fmt.Errorf("open download file failed: %w", err)
}
oo := &openObject{
@@ -1133,7 +1126,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Create the parent directory
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
if err != nil {
- return errors.Wrap(err, "update make parent dir failed")
+ return fmt.Errorf("update make parent dir failed: %w", err)
}
var u *mega.Upload
@@ -1142,7 +1135,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "upload file failed to create session")
+ return fmt.Errorf("upload file failed to create session: %w", err)
}
// Upload the chunks
@@ -1150,12 +1143,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
for id := 0; id < u.Chunks(); id++ {
_, chunkSize, err := u.ChunkLocation(id)
if err != nil {
- return errors.Wrap(err, "upload failed to read chunk location")
+ return fmt.Errorf("upload failed to read chunk location: %w", err)
}
chunk := make([]byte, chunkSize)
_, err = io.ReadFull(in, chunk)
if err != nil {
- return errors.Wrap(err, "upload failed to read data")
+ return fmt.Errorf("upload failed to read data: %w", err)
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1163,7 +1156,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "upload file failed to upload chunk")
+ return fmt.Errorf("upload file failed to upload chunk: %w", err)
}
}
@@ -1174,14 +1167,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "failed to finish upload")
+ return fmt.Errorf("failed to finish upload: %w", err)
}
// If the upload succeeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(ctx, o.info)
if err != nil {
- return errors.Wrap(err, "upload failed to remove old version")
+ return fmt.Errorf("upload failed to remove old version: %w", err)
}
o.info = nil
}
@@ -1193,7 +1186,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error {
err := o.fs.deleteNode(ctx, o.info)
if err != nil {
- return errors.Wrap(err, "Remove object failed")
+ return fmt.Errorf("Remove object failed: %w", err)
}
return nil
}
diff --git a/backend/memory/memory.go b/backend/memory/memory.go
index 0ff851d45..9675abcd3 100644
--- a/backend/memory/memory.go
+++ b/backend/memory/memory.go
@@ -14,7 +14,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -586,7 +585,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
bucket, bucketPath := o.split()
data, err := ioutil.ReadAll(in)
if err != nil {
- return errors.Wrap(err, "failed to update memory object")
+ return fmt.Errorf("failed to update memory object: %w", err)
}
o.od = &objectData{
data: data,
diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go
old mode 100755
new mode 100644
index 7e0b0a823..d76e17a35
--- a/backend/onedrive/onedrive.go
+++ b/backend/onedrive/onedrive.go
@@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -18,7 +19,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
"github.com/rclone/rclone/fs"
@@ -385,7 +385,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure OneDrive")
+ return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
srv := rest.NewClient(oAuthClient)
@@ -754,10 +754,10 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs%chunkSizeMultiple != 0 {
- return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
+ return fmt.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
}
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -781,7 +781,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "onedrive: chunk size")
+ return nil, fmt.Errorf("onedrive: chunk size: %w", err)
}
if opt.DriveID == "" || opt.DriveType == "" {
@@ -797,7 +797,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure OneDrive")
+ return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -828,7 +828,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Get rootID
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil || rootInfo.GetID() == "" {
- return nil, errors.Wrap(err, "failed to get root")
+ return nil, fmt.Errorf("failed to get root: %w", err)
}
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
@@ -971,7 +971,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
if len(result.Value) == 0 {
break
@@ -1175,7 +1175,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
var status api.AsyncOperationStatus
err = json.Unmarshal(body, &status)
if err != nil {
- return errors.Wrapf(err, "async status result not JSON: %q", body)
+ return fmt.Errorf("async status result not JSON: %q: %w", body, err)
}
switch status.Status {
@@ -1185,15 +1185,15 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
}
fallthrough
case "deleteFailed":
- return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
+ return fmt.Errorf("%s: async operation returned %q", o.remote, status.Status)
case "completed":
err = o.readMetaData(ctx)
- return errors.Wrapf(err, "async operation completed but readMetaData failed")
+ return fmt.Errorf("async operation completed but readMetaData failed: %w", err)
}
time.Sleep(1 * time.Second)
}
- return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
+ return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
}
// Copy src to this remote using server-side copy operations.
@@ -1232,7 +1232,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
- return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
+ return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
}
@@ -1450,7 +1450,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "about failed")
+ return nil, fmt.Errorf("about failed: %w", err)
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
@@ -1501,7 +1501,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
})
if err != nil {
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
- return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
+ return "", fmt.Errorf("%v (is making public links permitted by the org admin?)", err)
}
return "", err
}
@@ -1886,17 +1886,17 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
return 0, err
}
if len(info.NextExpectedRanges) != 1 {
- return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
+ return 0, fmt.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
}
position := info.NextExpectedRanges[0]
i := strings.IndexByte(position, '-')
if i < 0 {
- return 0, errors.Errorf("no '-' in next expected range: %q", position)
+ return 0, fmt.Errorf("no '-' in next expected range: %q", position)
}
position = position[:i]
pos, err = strconv.ParseInt(position, 10, 64)
if err != nil {
- return 0, errors.Wrapf(err, "bad expected range: %q", position)
+ return 0, fmt.Errorf("bad expected range: %q: %w", position, err)
}
return pos, nil
}
@@ -1930,14 +1930,14 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
switch {
case skip < 0:
- return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
+ return false, fmt.Errorf("sent block already (skip %d < 0), can't rewind: %w", skip, err)
case skip > chunkSize:
- return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
+ return false, fmt.Errorf("position is in the future (skip %d > chunkSize %d), can't skip forward: %w", skip, chunkSize, err)
case skip == chunkSize:
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
return false, nil
}
- return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
+ return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
}
if err != nil {
return shouldRetry(ctx, resp, err)
diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go
index 00d83357e..96f29aa33 100644
--- a/backend/opendrive/opendrive.go
+++ b/backend/opendrive/opendrive.go
@@ -2,6 +2,7 @@ package opendrive
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -11,7 +12,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -210,7 +210,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to create session")
+ return nil, fmt.Errorf("failed to create session: %w", err)
}
fs.Debugf(nil, "Starting OpenDrive session with ID: %s", f.session.SessionID)
@@ -362,7 +362,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
- return nil, errors.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
+ return nil, fmt.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -636,7 +636,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to create file")
+ return nil, fmt.Errorf("failed to create file: %w", err)
}
o.id = response.FileID
@@ -719,7 +719,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return "", false, errors.Wrap(err, "failed to get folder list")
+ return "", false, fmt.Errorf("failed to get folder list: %w", err)
}
leaf = f.opt.Enc.FromStandardName(leaf)
@@ -762,7 +762,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get folder list")
+ return nil, fmt.Errorf("failed to get folder list: %w", err)
}
for _, folder := range folderList.Folders {
@@ -871,7 +871,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to open file)")
+ return nil, fmt.Errorf("failed to open file): %w", err)
}
return resp.Body, nil
@@ -919,7 +919,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to create file")
+ return fmt.Errorf("failed to create file: %w", err)
}
// resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
@@ -963,10 +963,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to create file")
+ return fmt.Errorf("failed to create file: %w", err)
}
if reply.TotalWritten != currentChunkSize {
- return errors.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize)
+ return fmt.Errorf("failed to create file: incomplete write of %d/%d bytes", reply.TotalWritten, currentChunkSize)
}
chunkCounter++
@@ -986,7 +986,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to create file")
+ return fmt.Errorf("failed to create file: %w", err)
}
// fs.Debugf(nil, "PostClose: %#v", closeResponse)
@@ -1038,7 +1038,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to get folder list")
+ return fmt.Errorf("failed to get folder list: %w", err)
}
if len(folderList.Files) == 0 {
diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go
index f0ff23797..bb4c48653 100644
--- a/backend/pcloud/pcloud.go
+++ b/backend/pcloud/pcloud.go
@@ -10,6 +10,7 @@ package pcloud
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -18,7 +19,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/pcloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -290,7 +290,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Pcloud")
+ return nil, fmt.Errorf("failed to configure Pcloud: %w", err)
}
updateTokenURL(oauthConfig, opt.Hostname)
@@ -463,7 +463,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Metadata.Contents {
item := &result.Metadata.Contents[i]
@@ -600,7 +600,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "rmdir failed")
+ return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -872,7 +872,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "about failed")
+ return nil, fmt.Errorf("about failed: %w", err)
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
@@ -952,7 +952,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.md5 == "" && o.sha1 == "" && o.sha256 == "" {
err := o.getHashes(ctx)
if err != nil {
- return "", errors.Wrap(err, "failed to get hash")
+ return "", fmt.Errorf("failed to get hash: %w", err)
}
}
return *pHash, nil
@@ -971,7 +971,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.IsFolder {
- return errors.Wrapf(fs.ErrorNotAFile, "%q is a folder", o.remote)
+ return fmt.Errorf("%q is a folder: %w", o.remote, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = info.Size
@@ -1058,7 +1058,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
return "", err
}
if !result.IsValid() {
- return "", errors.Errorf("fetched invalid link %+v", result)
+ return "", fmt.Errorf("fetched invalid link %+v", result)
}
o.link = &result
return o.link.URL(), nil
@@ -1146,7 +1146,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
if err != nil {
- return errors.Wrap(err, "failed to make multipart upload for 0 length file")
+ return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
}
contentLength := overhead + size
@@ -1177,7 +1177,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
if len(result.Items) != 1 {
- return errors.Errorf("failed to upload %v - not sure why", o)
+ return fmt.Errorf("failed to upload %v - not sure why", o)
}
o.setHashes(&result.Checksums[0])
return o.setMetaData(&result.Items[0])
diff --git a/backend/premiumizeme/premiumizeme.go b/backend/premiumizeme/premiumizeme.go
index 9f152a3f8..58e2898e1 100644
--- a/backend/premiumizeme/premiumizeme.go
+++ b/backend/premiumizeme/premiumizeme.go
@@ -18,6 +18,7 @@ canStream = false
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"net"
@@ -27,7 +28,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/premiumizeme/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -250,7 +250,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.APIKey == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure premiumize.me")
+ return nil, fmt.Errorf("failed to configure premiumize.me: %w", err)
}
} else {
client = fshttp.NewClient(ctx)
@@ -380,10 +380,10 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
- return "", errors.Wrap(err, "CreateDir http")
+ return "", fmt.Errorf("CreateDir http: %w", err)
}
if err = info.AsErr(); err != nil {
- return "", errors.Wrap(err, "CreateDir")
+ return "", fmt.Errorf("CreateDir: %w", err)
}
// fmt.Printf("...Id %q\n", *info.Id)
return info.ID, nil
@@ -420,10 +420,10 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return newDirID, found, errors.Wrap(err, "couldn't list files")
+ return newDirID, found, fmt.Errorf("couldn't list files: %w", err)
}
if err = result.AsErr(); err != nil {
- return newDirID, found, errors.Wrap(err, "error while listing")
+ return newDirID, found, fmt.Errorf("error while listing: %w", err)
}
newDirID = result.FolderID
for i := range result.Content {
@@ -572,7 +572,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return true
})
if err != nil {
- return errors.Wrap(err, "purgeCheck")
+ return fmt.Errorf("purgeCheck: %w", err)
}
if found {
return fs.ErrorDirectoryNotEmpty
@@ -594,10 +594,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "rmdir failed")
+ return fmt.Errorf("rmdir failed: %w", err)
}
if err = result.AsErr(); err != nil {
- return errors.Wrap(err, "rmdir")
+ return fmt.Errorf("rmdir: %w", err)
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -645,7 +645,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
tmpLeaf := newLeaf + "." + random.String(8)
err = f.renameLeaf(ctx, isFile, id, tmpLeaf)
if err != nil {
- return errors.Wrap(err, "Move rename leaf")
+ return fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -674,10 +674,10 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "Move http")
+ return fmt.Errorf("Move http: %w", err)
}
if err = result.AsErr(); err != nil {
- return errors.Wrap(err, "Move")
+ return fmt.Errorf("Move: %w", err)
}
}
@@ -685,7 +685,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
if doRenameLeaf {
err = f.renameLeaf(ctx, isFile, id, newLeaf)
if err != nil {
- return errors.Wrap(err, "Move rename leaf")
+ return fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -783,10 +783,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "CreateDir http")
+ return nil, fmt.Errorf("CreateDir http: %w", err)
}
if err = info.AsErr(); err != nil {
- return nil, errors.Wrap(err, "CreateDir")
+ return nil, fmt.Errorf("CreateDir: %w", err)
}
usage = &fs.Usage{
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
@@ -843,7 +843,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != "file" {
- return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
+ return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = info.Size
@@ -953,19 +953,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var u *url.URL
u, err = url.Parse(info.URL)
if err != nil {
- return true, errors.Wrap(err, "failed to parse download URL")
+ return true, fmt.Errorf("failed to parse download URL: %w", err)
}
_, err = net.LookupIP(u.Hostname())
if err != nil {
- return true, errors.Wrap(err, "failed to resolve download URL")
+ return true, fmt.Errorf("failed to resolve download URL: %w", err)
}
return false, nil
})
if err != nil {
- return errors.Wrap(err, "upload get URL http")
+ return fmt.Errorf("upload get URL http: %w", err)
}
if err = info.AsErr(); err != nil {
- return errors.Wrap(err, "upload get URL")
+ return fmt.Errorf("upload get URL: %w", err)
}
// if file exists then rename it out the way otherwise uploads can fail
@@ -976,7 +976,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Moving old file out the way to %q", newLeaf)
err = o.fs.renameLeaf(ctx, true, oldID, newLeaf)
if err != nil {
- return errors.Wrap(err, "upload rename old file")
+ return fmt.Errorf("upload rename old file: %w", err)
}
defer func() {
// on failed upload rename old file back
@@ -984,7 +984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Renaming old file back (from %q to %q) since upload failed", leaf, newLeaf)
newErr := o.fs.renameLeaf(ctx, true, oldID, leaf)
if newErr != nil && err == nil {
- err = errors.Wrap(newErr, "upload renaming old file back")
+ err = fmt.Errorf("upload renaming old file back: %w", newErr)
}
}
}()
@@ -1007,10 +1007,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "upload file http")
+ return fmt.Errorf("upload file http: %w", err)
}
if err = result.AsErr(); err != nil {
- return errors.Wrap(err, "upload file")
+ return fmt.Errorf("upload file: %w", err)
}
// on successful upload, remove old file if it exists
@@ -1019,7 +1019,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removing old file")
err := o.fs.remove(ctx, oldID)
if err != nil {
- return errors.Wrap(err, "upload remove old file")
+ return fmt.Errorf("upload remove old file: %w", err)
}
}
@@ -1049,10 +1049,10 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "rename http")
+ return fmt.Errorf("rename http: %w", err)
}
if err = result.AsErr(); err != nil {
- return errors.Wrap(err, "rename")
+ return fmt.Errorf("rename: %w", err)
}
return nil
}
@@ -1074,10 +1074,10 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "remove http")
+ return fmt.Errorf("remove http: %w", err)
}
if err = result.AsErr(); err != nil {
- return errors.Wrap(err, "remove")
+ return fmt.Errorf("remove: %w", err)
}
return nil
}
@@ -1086,7 +1086,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
func (o *Object) Remove(ctx context.Context) error {
err := o.readMetaData(ctx)
if err != nil {
- return errors.Wrap(err, "Remove: Failed to read metadata")
+ return fmt.Errorf("Remove: Failed to read metadata: %w", err)
}
return o.fs.remove(ctx, o.id)
}
diff --git a/backend/putio/fs.go b/backend/putio/fs.go
index fc8e352ac..073f86a44 100644
--- a/backend/putio/fs.go
+++ b/backend/putio/fs.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/base64"
+ "errors"
"fmt"
"io"
"net/http"
@@ -13,7 +14,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
@@ -80,7 +80,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
httpClient := fshttp.NewClient(ctx)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure putio")
+ return nil, fmt.Errorf("failed to configure putio: %w", err)
}
p := &Fs{
name: name,
@@ -469,7 +469,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// check directory exists
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
- return errors.Wrap(err, "Rmdir")
+ return fmt.Errorf("Rmdir: %w", err)
}
dirID := atoi(directoryID)
@@ -482,7 +482,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "Rmdir")
+ return fmt.Errorf("Rmdir: %w", err)
}
if len(children) != 0 {
return errors.New("directory not empty")
@@ -647,7 +647,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "about failed")
+ return nil, fmt.Errorf("about failed: %w", err)
}
return &fs.Usage{
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
diff --git a/backend/putio/object.go b/backend/putio/object.go
index acc988c58..0383a355e 100644
--- a/backend/putio/object.go
+++ b/backend/putio/object.go
@@ -2,6 +2,7 @@ package putio
import (
"context"
+ "fmt"
"io"
"net/http"
"net/url"
@@ -9,7 +10,6 @@ import (
"strconv"
"time"
- "github.com/pkg/errors"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
@@ -82,7 +82,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
}
err := o.readEntryAndSetMetadata(ctx)
if err != nil {
- return "", errors.Wrap(err, "failed to read hash from metadata")
+ return "", fmt.Errorf("failed to read hash from metadata: %w", err)
}
return o.file.CRC32, nil
}
diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go
index da9581edc..d4053e250 100644
--- a/backend/qingstor/qingstor.go
+++ b/backend/qingstor/qingstor.go
@@ -8,6 +8,7 @@ package qingstor
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -17,7 +18,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -285,7 +285,7 @@ func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error)
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -300,7 +300,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
- return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
+ return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
@@ -329,11 +329,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "qingstor: chunk size")
+ return nil, fmt.Errorf("qingstor: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
- return nil, errors.Wrap(err, "qingstor: upload cutoff")
+ return nil, fmt.Errorf("qingstor: upload cutoff: %w", err)
}
svc, err := qsServiceConnection(ctx, opt)
if err != nil {
@@ -884,7 +884,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
var resp *qs.ListMultipartUploadsOutput
resp, err = bucketInit.ListMultipartUploads(&req)
if err != nil {
- return errors.Wrap(err, "clean up bucket list multipart uploads")
+ return fmt.Errorf("clean up bucket list multipart uploads: %w", err)
}
for _, upload := range resp.Uploads {
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
@@ -896,7 +896,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
}
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
if abortErr != nil {
- err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
+ err = fmt.Errorf("failed to remove multipart upload for %q: %w", *upload.Key, abortErr)
fs.Errorf(f, "%v", err)
}
} else {
diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go
index 200ef3554..d9a818019 100644
--- a/backend/qingstor/upload.go
+++ b/backend/qingstor/upload.go
@@ -8,13 +8,13 @@ package qingstor
import (
"bytes"
"crypto/md5"
+ "errors"
"fmt"
"hash"
"io"
"sort"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
qs "github.com/yunify/qingstor-sdk-go/v3/service"
@@ -175,7 +175,7 @@ func (u *uploader) upload() error {
u.init()
if u.cfg.partSize < minMultiPartSize {
- return errors.Errorf("part size must be at least %d bytes", minMultiPartSize)
+ return fmt.Errorf("part size must be at least %d bytes", minMultiPartSize)
}
// Do one read to determine if we have more than one part
@@ -184,7 +184,7 @@ func (u *uploader) upload() error {
fs.Debugf(u, "Uploading as single part object to QingStor")
return u.singlePartUpload(reader, u.readerPos)
} else if err != nil {
- return errors.Errorf("read upload data failed: %s", err)
+ return fmt.Errorf("read upload data failed: %s", err)
}
fs.Debugf(u, "Uploading as multi-part object to QingStor")
diff --git a/backend/s3/s3.go b/backend/s3/s3.go
index 5fa7400f1..e01f6f2c4 100644
--- a/backend/s3/s3.go
+++ b/backend/s3/s3.go
@@ -9,6 +9,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/xml"
+ "errors"
"fmt"
"io"
"net/http"
@@ -33,7 +34,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/ncw/swift/v2"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -1557,7 +1557,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
// start a new AWS session
awsSession, err := session.NewSession()
if err != nil {
- return nil, nil, errors.Wrap(err, "NewSession")
+ return nil, nil, fmt.Errorf("NewSession: %w", err)
}
// first provider to supply a credential set "wins"
@@ -1661,7 +1661,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -1676,7 +1676,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
- return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
+ return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
@@ -1789,11 +1789,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "s3: chunk size")
+ return nil, fmt.Errorf("s3: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
- return nil, errors.Wrap(err, "s3: upload cutoff")
+ return nil, fmt.Errorf("s3: upload cutoff: %w", err)
}
if opt.ACL == "" {
opt.ACL = "private"
@@ -1931,13 +1931,13 @@ func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, erro
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
region, err := f.getBucketLocation(ctx, bucket)
if err != nil {
- return errors.Wrap(err, "reading bucket location failed")
+ return fmt.Errorf("reading bucket location failed: %w", err)
}
if aws.StringValue(f.c.Config.Endpoint) != "" {
- return errors.Errorf("can't set region to %q as endpoint is set", region)
+ return fmt.Errorf("can't set region to %q as endpoint is set", region)
}
if aws.StringValue(f.c.Config.Region) == region {
- return errors.Errorf("region is already %q - not updating", region)
+ return fmt.Errorf("region is already %q - not updating", region)
}
// Make a new session with the new region
@@ -1945,7 +1945,7 @@ func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
f.opt.Region = region
c, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
if err != nil {
- return errors.Wrap(err, "creating new session failed")
+ return fmt.Errorf("creating new session failed: %w", err)
}
f.c = c
f.ses = ses
@@ -2141,7 +2141,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if startAfter != nil && urlEncodeListings {
*startAfter, err = url.QueryUnescape(*startAfter)
if err != nil {
- return errors.Wrapf(err, "failed to URL decode StartAfter/NextMarker %q", *continuationToken)
+ return fmt.Errorf("failed to URL decode StartAfter/NextMarker %q: %w", *continuationToken, err)
}
}
}
@@ -2727,7 +2727,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if lifetime := opt["lifetime"]; lifetime != "" {
ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
if err != nil {
- return nil, errors.Wrap(err, "bad lifetime")
+ return nil, fmt.Errorf("bad lifetime: %w", err)
}
req.RestoreRequest.Days = &ilifetime
}
@@ -2786,7 +2786,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
- return nil, errors.Wrap(err, "bad max-age")
+ return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, maxAge)
@@ -2820,7 +2820,7 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo
return f.shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
+ return nil, fmt.Errorf("list multipart uploads bucket %q key %q: %w", bucket, key, err)
}
uploads = append(uploads, resp.Uploads...)
if !aws.BoolValue(resp.IsTruncated) {
@@ -2878,7 +2878,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
}
_, abortErr := f.c.AbortMultipartUpload(&req)
if abortErr != nil {
- err = errors.Wrapf(abortErr, "failed to remove %s", what)
+ err = fmt.Errorf("failed to remove %s: %w", what, abortErr)
fs.Errorf(f, "%v", err)
}
} else {
@@ -3218,7 +3218,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
- return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
+ return nil, fmt.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
}
}
if err != nil {
@@ -3296,7 +3296,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
return f.shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "multipart upload failed to initialise")
+ return fmt.Errorf("multipart upload failed to initialise: %w", err)
}
uid := cout.UploadId
@@ -3356,7 +3356,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
finished = true
} else if err != nil {
free()
- return errors.Wrap(err, "multipart upload failed to read source")
+ return fmt.Errorf("multipart upload failed to read source: %w", err)
}
buf = buf[:n]
@@ -3403,7 +3403,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
return false, nil
})
if err != nil {
- return errors.Wrap(err, "multipart upload failed to upload part")
+ return fmt.Errorf("multipart upload failed to upload part: %w", err)
}
return nil
})
@@ -3431,7 +3431,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
return f.shouldRetry(ctx, err)
})
if err != nil {
- return errors.Wrap(err, "multipart upload failed to finalise")
+ return fmt.Errorf("multipart upload failed to finalise: %w", err)
}
return nil
}
@@ -3557,7 +3557,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
- return errors.Wrap(err, "s3 upload: sign request")
+ return fmt.Errorf("s3 upload: sign request: %w", err)
}
if o.fs.opt.V2Auth && headers == nil {
@@ -3572,7 +3572,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// create the vanilla http request
httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
if err != nil {
- return errors.Wrap(err, "s3 upload: new request")
+ return fmt.Errorf("s3 upload: new request: %w", err)
}
// set the headers we signed and the length
@@ -3592,7 +3592,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
- err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
+ err = fmt.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go
index e6100ab00..76f9843ab 100644
--- a/backend/seafile/seafile.go
+++ b/backend/seafile/seafile.go
@@ -2,6 +2,7 @@ package seafile
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -13,7 +14,6 @@ import (
"time"
"github.com/coreos/go-semver/semver"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/seafile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -171,14 +171,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var err error
opt.Password, err = obscure.Reveal(opt.Password)
if err != nil {
- return nil, errors.Wrap(err, "couldn't decrypt user password")
+ return nil, fmt.Errorf("couldn't decrypt user password: %w", err)
}
}
if opt.LibraryKey != "" {
var err error
opt.LibraryKey, err = obscure.Reveal(opt.LibraryKey)
if err != nil {
- return nil, errors.Wrap(err, "couldn't decrypt library password")
+ return nil, fmt.Errorf("couldn't decrypt library password: %w", err)
}
}
@@ -282,7 +282,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(ctx, remote)
if err != nil {
- if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
+ if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the original f
f.rootDirectory = rootDirectory
return f, nil
@@ -305,7 +305,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
u, err := url.Parse(serverURL)
if err != nil {
- return nil, errors.Errorf("invalid server URL %s", serverURL)
+ return nil, fmt.Errorf("invalid server URL %s", serverURL)
}
is2faEnabled, _ := m.Get(config2FA)
@@ -886,7 +886,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// 1- rename source
err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName)
if err != nil {
- return errors.Wrap(err, "Cannot rename source directory to a temporary name")
+ return fmt.Errorf("Cannot rename source directory to a temporary name: %w", err)
}
// 2- move source to destination
@@ -900,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// 3- rename destination back to source name
err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName)
if err != nil {
- return errors.Wrap(err, "Cannot rename temporary directory to destination name")
+ return fmt.Errorf("Cannot rename temporary directory to destination name: %w", err)
}
return nil
diff --git a/backend/seafile/webapi.go b/backend/seafile/webapi.go
index 563cd1896..29d2f8645 100644
--- a/backend/seafile/webapi.go
+++ b/backend/seafile/webapi.go
@@ -3,6 +3,7 @@ package seafile
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -11,7 +12,6 @@ import (
"path"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/seafile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/readers"
@@ -61,7 +61,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
_, err := srv.CallJSON(ctx, &opts, &request, &result)
if err != nil {
// This is only going to be http errors here
- return "", errors.Wrap(err, "failed to authenticate")
+ return "", fmt.Errorf("failed to authenticate: %w", err)
}
if result.Errors != nil && len(result.Errors) > 0 {
return "", errors.New(strings.Join(result.Errors, ", "))
@@ -94,7 +94,7 @@ func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err er
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to get server info")
+ return nil, fmt.Errorf("failed to get server info: %w", err)
}
return &result, nil
}
@@ -120,7 +120,7 @@ func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo,
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to get account info")
+ return nil, fmt.Errorf("failed to get account info: %w", err)
}
return &result, nil
}
@@ -147,7 +147,7 @@ func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) {
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to get libraries")
+ return nil, fmt.Errorf("failed to get libraries: %w", err)
}
return result, nil
}
@@ -178,7 +178,7 @@ func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (l
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to create library")
+ return nil, fmt.Errorf("failed to create library: %w", err)
}
return result, nil
}
@@ -205,7 +205,7 @@ func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error {
return fs.ErrorPermissionDenied
}
}
- return errors.Wrap(err, "failed to delete library")
+ return fmt.Errorf("failed to delete library: %w", err)
}
return nil
}
@@ -240,7 +240,7 @@ func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) err
return nil
}
}
- return errors.Wrap(err, "failed to decrypt library")
+ return fmt.Errorf("failed to decrypt library: %w", err)
}
return nil
}
@@ -286,7 +286,7 @@ func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath s
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to get directory contents")
+ return nil, fmt.Errorf("failed to get directory contents: %w", err)
}
// Clean up encoded names
@@ -327,7 +327,7 @@ func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string)
return nil, fs.ErrorDirNotFound
}
}
- return nil, errors.Wrap(err, "failed to get directory details")
+ return nil, fmt.Errorf("failed to get directory details: %w", err)
}
result.Name = f.opt.Enc.ToStandardName(result.Name)
result.Path = f.opt.Enc.ToStandardPath(result.Path)
@@ -366,7 +366,7 @@ func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error {
return fs.ErrorPermissionDenied
}
}
- return errors.Wrap(err, "failed to create directory")
+ return fmt.Errorf("failed to create directory: %w", err)
}
return nil
}
@@ -406,7 +406,7 @@ func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string)
return fs.ErrorPermissionDenied
}
}
- return errors.Wrap(err, "failed to rename directory")
+ return fmt.Errorf("failed to rename directory: %w", err)
}
return nil
}
@@ -449,7 +449,7 @@ func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibr
return fs.ErrorObjectNotFound
}
}
- return errors.Wrap(err, fmt.Sprintf("failed to move directory '%s' from '%s' to '%s'", srcName, srcDir, dstPath))
+ return fmt.Errorf("failed to move directory '%s' from '%s' to '%s': %w", srcName, srcDir, dstPath, err)
}
return nil
@@ -482,7 +482,7 @@ func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error {
return fs.ErrorPermissionDenied
}
}
- return errors.Wrap(err, "failed to delete directory")
+ return fmt.Errorf("failed to delete directory: %w", err)
}
return nil
}
@@ -516,7 +516,7 @@ func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*a
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to get file details")
+ return nil, fmt.Errorf("failed to get file details: %w", err)
}
result.Name = f.opt.Enc.ToStandardName(result.Name)
result.Parent = f.opt.Enc.ToStandardPath(result.Parent)
@@ -542,7 +542,7 @@ func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to delete file")
+ return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
@@ -573,7 +573,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
return "", fs.ErrorObjectNotFound
}
}
- return "", errors.Wrap(err, "failed to get download link")
+ return "", fmt.Errorf("failed to get download link: %w", err)
}
return result, nil
}
@@ -667,7 +667,7 @@ func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error
return "", fs.ErrorPermissionDenied
}
}
- return "", errors.Wrap(err, "failed to get upload link")
+ return "", fmt.Errorf("failed to get upload link: %w", err)
}
return result, nil
}
@@ -684,7 +684,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
}
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
if err != nil {
- return nil, errors.Wrap(err, "failed to make multipart upload")
+ return nil, fmt.Errorf("failed to make multipart upload: %w", err)
}
opts := rest.Opts{
@@ -711,7 +711,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
return nil, ErrorInternalDuringUpload
}
}
- return nil, errors.Wrap(err, "failed to upload file")
+ return nil, fmt.Errorf("failed to upload file: %w", err)
}
if len(result) > 0 {
result[0].Parent = f.opt.Enc.ToStandardPath(result[0].Parent)
@@ -750,7 +750,7 @@ func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]ap
return nil, fs.ErrorObjectNotFound
}
}
- return nil, errors.Wrap(err, "failed to list shared links")
+ return nil, fmt.Errorf("failed to list shared links: %w", err)
}
return result, nil
}
@@ -788,7 +788,7 @@ func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*ap
return nil, fs.ErrorObjectNotFound
}
}
- return nil, errors.Wrap(err, "failed to create a shared link")
+ return nil, fmt.Errorf("failed to create a shared link: %w", err)
}
return result, nil
}
@@ -830,7 +830,7 @@ func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
return nil, fs.ErrorObjectNotFound
}
}
- return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
+ return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
return f.decodeFileInfo(result), nil
}
@@ -872,7 +872,7 @@ func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
return nil, fs.ErrorObjectNotFound
}
}
- return nil, errors.Wrap(err, fmt.Sprintf("failed to move file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
+ return nil, fmt.Errorf("failed to move file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
return f.decodeFileInfo(result), nil
}
@@ -912,7 +912,7 @@ func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string
return nil, fs.ErrorObjectNotFound
}
}
- return nil, errors.Wrap(err, fmt.Sprintf("failed to rename file '%s' to '%s'", filePath, newname))
+ return nil, fmt.Errorf("failed to rename file '%s' to '%s': %w", filePath, newname, err)
}
return f.decodeFileInfo(result), nil
}
@@ -949,7 +949,7 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
return fs.ErrorObjectNotFound
}
}
- return errors.Wrap(err, "failed empty the library trash")
+ return fmt.Errorf("failed empty the library trash: %w", err)
}
return nil
}
@@ -991,7 +991,7 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, "failed to get directory contents")
+ return nil, fmt.Errorf("failed to get directory contents: %w", err)
}
// Clean up encoded names
@@ -1038,7 +1038,7 @@ func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibrar
return nil, fs.ErrorPermissionDenied
}
}
- return nil, errors.Wrap(err, fmt.Sprintf("failed to copy file %s:'%s' to %s:'%s'", srcLibraryID, srcPath, dstLibraryID, dstPath))
+ return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
err = rest.DecodeJSON(resp, &result)
if err != nil {
@@ -1090,7 +1090,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
return fs.ErrorObjectNotFound
}
}
- return errors.Wrap(err, "failed to rename file")
+ return fmt.Errorf("failed to rename file: %w", err)
}
return nil
}
diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go
index 1564ff90c..86a452315 100644
--- a/backend/sftp/sftp.go
+++ b/backend/sftp/sftp.go
@@ -8,6 +8,7 @@ package sftp
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -20,7 +21,6 @@ import (
"sync/atomic"
"time"
- "github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -384,12 +384,12 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
}
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil {
- return nil, errors.Wrap(err, "couldn't connect SSH")
+ return nil, fmt.Errorf("couldn't connect SSH: %w", err)
}
c.sftpClient, err = f.newSftpClient(c.sshClient)
if err != nil {
_ = c.sshClient.Close()
- return nil, errors.Wrap(err, "couldn't initialise SFTP")
+ return nil, fmt.Errorf("couldn't initialise SFTP: %w", err)
}
go c.wait()
return c, nil
@@ -468,16 +468,16 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
*pc = nil
if err != nil {
// work out if this is an expected error
- underlyingErr := errors.Cause(err)
isRegularError := false
- switch underlyingErr {
- case os.ErrNotExist:
+ var statusErr *sftp.StatusError
+ var pathErr *os.PathError
+ switch {
+ case errors.Is(err, os.ErrNotExist):
+ isRegularError = true
+ case errors.As(err, &statusErr):
+ isRegularError = true
+ case errors.As(err, &pathErr):
isRegularError = true
- default:
- switch underlyingErr.(type) {
- case *sftp.StatusError, *os.PathError:
- isRegularError = true
- }
}
// If not a regular SFTP error code then check the connection
if !isRegularError {
@@ -561,7 +561,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
if err != nil {
- return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
+ return nil, fmt.Errorf("couldn't parse known_hosts_file: %w", err)
}
sshConfig.HostKeyCallback = hostcallback
}
@@ -579,20 +579,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New()
if err != nil {
- return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
+ return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
}
signers, err := sshAgentClient.Signers()
if err != nil {
- return nil, errors.Wrap(err, "couldn't read ssh agent signers")
+ return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
}
if keyFile != "" {
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
if err != nil {
- return nil, errors.Wrap(err, "failed to read public key file")
+ return nil, fmt.Errorf("failed to read public key file: %w", err)
}
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse public key file")
+ return nil, fmt.Errorf("failed to parse public key file: %w", err)
}
pubM := pub.Marshal()
found := false
@@ -617,13 +617,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.KeyPem == "" {
key, err = ioutil.ReadFile(keyFile)
if err != nil {
- return nil, errors.Wrap(err, "failed to read private key file")
+ return nil, fmt.Errorf("failed to read private key file: %w", err)
}
} else {
// wrap in quotes because the config is a coming as a literal without them.
opt.KeyPem, err = strconv.Unquote("\"" + opt.KeyPem + "\"")
if err != nil {
- return nil, errors.Wrap(err, "pem key not formatted properly")
+ return nil, fmt.Errorf("pem key not formatted properly: %w", err)
}
key = []byte(opt.KeyPem)
}
@@ -641,19 +641,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(clearpass))
}
if err != nil {
- return nil, errors.Wrap(err, "failed to parse private key file")
+ return nil, fmt.Errorf("failed to parse private key file: %w", err)
}
// If a public key has been specified then use that
if pubkeyFile != "" {
certfile, err := ioutil.ReadFile(pubkeyFile)
if err != nil {
- return nil, errors.Wrap(err, "unable to read cert file")
+ return nil, fmt.Errorf("unable to read cert file: %w", err)
}
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
if err != nil {
- return nil, errors.Wrap(err, "unable to parse cert file")
+ return nil, fmt.Errorf("unable to parse cert file: %w", err)
}
// And the signer for this, which includes the private key signer
@@ -669,7 +669,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
pubsigner, err := ssh.NewCertSigner(cert, signer)
if err != nil {
- return nil, errors.Wrap(err, "error generating cert signer")
+ return nil, fmt.Errorf("error generating cert signer: %w", err)
}
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(pubsigner))
} else {
@@ -759,7 +759,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
// Make a connection and pool it to return errors early
c, err := f.getSftpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "NewFs")
+ return nil, fmt.Errorf("NewFs: %w", err)
}
cwd, err := c.sftpClient.Getwd()
f.putSftpConnection(&c, nil)
@@ -840,7 +840,7 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
}
c, err := f.getSftpConnection(ctx)
if err != nil {
- return false, errors.Wrap(err, "dirExists")
+ return false, fmt.Errorf("dirExists: %w", err)
}
info, err := c.sftpClient.Stat(dir)
f.putSftpConnection(&c, err)
@@ -848,7 +848,7 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
- return false, errors.Wrap(err, "dirExists stat failed")
+ return false, fmt.Errorf("dirExists stat failed: %w", err)
}
if !info.IsDir() {
return false, fs.ErrorIsFile
@@ -869,7 +869,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
root := path.Join(f.absRoot, dir)
ok, err := f.dirExists(ctx, root)
if err != nil {
- return nil, errors.Wrap(err, "List failed")
+ return nil, fmt.Errorf("List failed: %w", err)
}
if !ok {
return nil, fs.ErrorDirNotFound
@@ -880,12 +880,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
c, err := f.getSftpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "List")
+ return nil, fmt.Errorf("List: %w", err)
}
infos, err := c.sftpClient.ReadDir(sftpDir)
f.putSftpConnection(&c, err)
if err != nil {
- return nil, errors.Wrapf(err, "error listing %q", dir)
+ return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
for _, info := range infos {
remote := path.Join(dir, info.Name())
@@ -924,7 +924,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
err := f.mkParentDir(ctx, src.Remote())
if err != nil {
- return nil, errors.Wrap(err, "Put mkParentDir failed")
+ return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
}
// Temporary object under construction
o := &Object{
@@ -959,7 +959,7 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
}
ok, err := f.dirExists(ctx, dirPath)
if err != nil {
- return errors.Wrap(err, "mkdir dirExists failed")
+ return fmt.Errorf("mkdir dirExists failed: %w", err)
}
if ok {
return nil
@@ -971,12 +971,12 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
}
c, err := f.getSftpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "mkdir")
+ return fmt.Errorf("mkdir: %w", err)
}
err = c.sftpClient.Mkdir(dirPath)
f.putSftpConnection(&c, err)
if err != nil {
- return errors.Wrapf(err, "mkdir %q failed", dirPath)
+ return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
}
return nil
}
@@ -993,7 +993,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// delete recursively with RemoveDirectory
entries, err := f.List(ctx, dir)
if err != nil {
- return errors.Wrap(err, "Rmdir")
+ return fmt.Errorf("Rmdir: %w", err)
}
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
@@ -1002,7 +1002,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.absRoot, dir)
c, err := f.getSftpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "Rmdir")
+ return fmt.Errorf("Rmdir: %w", err)
}
err = c.sftpClient.RemoveDirectory(root)
f.putSftpConnection(&c, err)
@@ -1018,11 +1018,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
err := f.mkParentDir(ctx, remote)
if err != nil {
- return nil, errors.Wrap(err, "Move mkParentDir failed")
+ return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
}
c, err := f.getSftpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Move")
+ return nil, fmt.Errorf("Move: %w", err)
}
err = c.sftpClient.Rename(
srcObj.path(),
@@ -1030,11 +1030,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
)
f.putSftpConnection(&c, err)
if err != nil {
- return nil, errors.Wrap(err, "Move Rename failed")
+ return nil, fmt.Errorf("Move Rename failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
- return nil, errors.Wrap(err, "Move NewObject failed")
+ return nil, fmt.Errorf("Move NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -1059,7 +1059,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Check if destination exists
ok, err := f.dirExists(ctx, dstPath)
if err != nil {
- return errors.Wrap(err, "DirMove dirExists dst failed")
+ return fmt.Errorf("DirMove dirExists dst failed: %w", err)
}
if ok {
return fs.ErrorDirExists
@@ -1068,13 +1068,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath))
if err != nil {
- return errors.Wrap(err, "DirMove mkParentDir dst failed")
+ return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
// Do the move
c, err := f.getSftpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "DirMove")
+ return fmt.Errorf("DirMove: %w", err)
}
err = c.sftpClient.Rename(
srcPath,
@@ -1082,7 +1082,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
)
f.putSftpConnection(&c, err)
if err != nil {
- return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
+ return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
}
return nil
}
@@ -1094,13 +1094,13 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
c, err := f.getSftpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "run: get SFTP connection")
+ return nil, fmt.Errorf("run: get SFTP connection: %w", err)
}
defer f.putSftpConnection(&c, err)
session, err := c.sshClient.NewSession()
if err != nil {
- return nil, errors.Wrap(err, "run: get SFTP session")
+ return nil, fmt.Errorf("run: get SFTP session: %w", err)
}
defer func() {
_ = session.Close()
@@ -1112,7 +1112,7 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
err = session.Run(cmd)
if err != nil {
- return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
+ return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
}
return stdout.Bytes(), nil
@@ -1186,7 +1186,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
stdout, err := f.run(ctx, "df -k "+escapedPath)
if err != nil {
- return nil, errors.Wrap(err, "your remote may not support About")
+ return nil, fmt.Errorf("your remote may not support About: %w", err)
}
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
@@ -1257,12 +1257,12 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
- return "", errors.Wrap(err, "Hash get SFTP connection")
+ return "", fmt.Errorf("Hash get SFTP connection: %w", err)
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
- return "", errors.Wrap(err, "Hash put SFTP connection")
+ return "", fmt.Errorf("Hash put SFTP connection: %w", err)
}
var stdout, stderr bytes.Buffer
@@ -1366,7 +1366,7 @@ func (o *Object) setMetadata(info os.FileInfo) {
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
c, err := f.getSftpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "stat")
+ return nil, fmt.Errorf("stat: %w", err)
}
absPath := path.Join(f.absRoot, remote)
info, err = c.sftpClient.Stat(absPath)
@@ -1381,7 +1381,7 @@ func (o *Object) stat(ctx context.Context) error {
if os.IsNotExist(err) {
return fs.ErrorObjectNotFound
}
- return errors.Wrap(err, "stat failed")
+ return fmt.Errorf("stat failed: %w", err)
}
if info.IsDir() {
return fs.ErrorIsDir
@@ -1399,16 +1399,16 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "SetModTime")
+ return fmt.Errorf("SetModTime: %w", err)
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
- return errors.Wrap(err, "SetModTime failed")
+ return fmt.Errorf("SetModTime failed: %w", err)
}
err = o.stat(ctx)
if err != nil {
- return errors.Wrap(err, "SetModTime stat failed")
+ return fmt.Errorf("SetModTime stat failed: %w", err)
}
return nil
}
@@ -1487,17 +1487,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Open")
+ return nil, fmt.Errorf("Open: %w", err)
}
sftpFile, err := c.sftpClient.Open(o.path())
o.fs.putSftpConnection(&c, err)
if err != nil {
- return nil, errors.Wrap(err, "Open failed")
+ return nil, fmt.Errorf("Open failed: %w", err)
}
if offset > 0 {
off, err := sftpFile.Seek(offset, io.SeekStart)
if err != nil || off != offset {
- return nil, errors.Wrap(err, "Open Seek failed")
+ return nil, fmt.Errorf("Open Seek failed: %w", err)
}
}
in = readers.NewLimitedReadCloser(o.fs.newObjectReader(sftpFile), limit)
@@ -1526,12 +1526,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.sha1sum = nil
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "Update")
+ return fmt.Errorf("Update: %w", err)
}
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
o.fs.putSftpConnection(&c, err)
if err != nil {
- return errors.Wrap(err, "Update Create failed")
+ return fmt.Errorf("Update Create failed: %w", err)
}
// remove the file if upload failed
remove := func() {
@@ -1551,18 +1551,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
if err != nil {
remove()
- return errors.Wrap(err, "Update ReadFrom failed")
+ return fmt.Errorf("Update ReadFrom failed: %w", err)
}
err = file.Close()
if err != nil {
remove()
- return errors.Wrap(err, "Update Close failed")
+ return fmt.Errorf("Update Close failed: %w", err)
}
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
- return errors.Wrap(err, "Update SetModTime failed")
+ return fmt.Errorf("Update SetModTime failed: %w", err)
}
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
@@ -1576,7 +1576,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.size = src.Size()
o.mode = os.FileMode(0666) // regular file
} else if err != nil {
- return errors.Wrap(err, "Update stat failed")
+ return fmt.Errorf("Update stat failed: %w", err)
}
}
@@ -1587,7 +1587,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error {
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
- return errors.Wrap(err, "Remove")
+ return fmt.Errorf("Remove: %w", err)
}
err = c.sftpClient.Remove(o.path())
o.fs.putSftpConnection(&c, err)
diff --git a/backend/sharefile/api/types.go b/backend/sharefile/api/types.go
index 655681351..7b48bbec7 100644
--- a/backend/sharefile/api/types.go
+++ b/backend/sharefile/api/types.go
@@ -2,10 +2,9 @@
package api
import (
+ "errors"
"fmt"
"time"
-
- "github.com/pkg/errors"
)
// ListRequestSelect should be used in $select for Items/Children
@@ -122,7 +121,7 @@ type UploadFinishResponse struct {
// ID returns the ID of the first response if available
func (finish *UploadFinishResponse) ID() (string, error) {
if finish.Error {
- return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
+ return "", fmt.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
}
if len(finish.Value) == 0 {
return "", errors.New("upload failed: no results returned")
diff --git a/backend/sharefile/sharefile.go b/backend/sharefile/sharefile.go
index 62378d9f3..69da61cb9 100644
--- a/backend/sharefile/sharefile.go
+++ b/backend/sharefile/sharefile.go
@@ -74,6 +74,7 @@ Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|']
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -83,7 +84,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/sharefile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -144,7 +144,7 @@ func init() {
subdomain := auth.Form.Get("subdomain")
apicp := auth.Form.Get("apicp")
if subdomain == "" || apicp == "" {
- return errors.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
+ return fmt.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
}
endpoint := "https://" + subdomain + "." + apicp
m.Set("endpoint", endpoint)
@@ -334,7 +334,7 @@ func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directo
}
return nil, fs.ErrorDirNotFound
}
- return nil, errors.Wrap(err, "couldn't find item")
+ return nil, fmt.Errorf("couldn't find item: %w", err)
}
if directoriesOnly && item.Type != api.ItemTypeFolder {
return nil, fs.ErrorIsFile
@@ -386,10 +386,10 @@ func errorHandler(resp *http.Response) error {
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
- return errors.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
- return errors.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
+ return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -444,7 +444,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var ts *oauthutil.TokenSource
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure sharefile")
+ return nil, fmt.Errorf("failed to configure sharefile: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -477,23 +477,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
const serverTimezone = "America/New_York"
timezone, err := tzdata.Open(serverTimezone)
if err != nil {
- return nil, errors.Wrap(err, "failed to open timezone db")
+ return nil, fmt.Errorf("failed to open timezone db: %w", err)
}
tzdata, err := ioutil.ReadAll(timezone)
if err != nil {
- return nil, errors.Wrap(err, "failed to read timezone")
+ return nil, fmt.Errorf("failed to read timezone: %w", err)
}
_ = timezone.Close()
f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata)
if err != nil {
- return nil, errors.Wrap(err, "failed to load location from timezone")
+ return nil, fmt.Errorf("failed to load location from timezone: %w", err)
}
// Find ID of user's root folder
if opt.RootFolderID == "" {
item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false)
if err != nil {
- return nil, errors.Wrap(err, "couldn't find root ID")
+ return nil, fmt.Errorf("couldn't find root ID: %w", err)
}
f.rootID = item.ID
} else {
@@ -639,7 +639,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return "", errors.Wrap(err, "CreateDir")
+ return "", fmt.Errorf("CreateDir: %w", err)
}
return info.ID, nil
}
@@ -671,7 +671,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
for i := range result.Value {
item := &result.Value[i]
@@ -825,7 +825,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return true
})
if err != nil {
- return errors.Wrap(err, "purgeCheck")
+ return fmt.Errorf("purgeCheck: %w", err)
}
if found {
return fs.ErrorDirectoryNotEmpty
@@ -900,7 +900,7 @@ func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTi
// Parse it back into a time
newModTime, err := time.Parse(time.RFC3339Nano, isoTime)
if err != nil {
- return nil, errors.Wrap(err, "updateItem: time parse")
+ return nil, fmt.Errorf("updateItem: time parse: %w", err)
}
modTime = &newModTime
}
@@ -934,7 +934,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
// To demonstrate bug
// item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil)
// if err != nil {
- // return nil, errors.Wrap(err, "Move rename leaf")
+ // return nil, fmt.Errorf("Move rename leaf: %w", err)
// }
// return item, nil
doRenameLeaf := oldLeaf != newLeaf
@@ -947,7 +947,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
tmpLeaf := newLeaf + "." + random.String(8)
item, err = f.updateItem(ctx, id, tmpLeaf, "", nil)
if err != nil {
- return nil, errors.Wrap(err, "Move rename leaf")
+ return nil, fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -956,7 +956,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
if doMove {
item, err = f.updateItem(ctx, id, "", newDirectoryID, nil)
if err != nil {
- return nil, errors.Wrap(err, "Move directory")
+ return nil, fmt.Errorf("Move directory: %w", err)
}
}
@@ -964,7 +964,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
if doRenameLeaf {
item, err = f.updateItem(ctx, id, newLeaf, "", nil)
if err != nil {
- return nil, errors.Wrap(err, "Move rename leaf")
+ return nil, fmt.Errorf("Move rename leaf: %w", err)
}
}
@@ -1079,7 +1079,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
if sameName && srcParentID == dstParentID {
- return nil, errors.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
+ return nil, fmt.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
}
// Discover whether we can just copy directly or not
@@ -1095,7 +1095,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound {
directCopy = true
} else if err != nil {
- return nil, errors.Wrap(err, "copy: failed to examine destination dir")
+ return nil, fmt.Errorf("copy: failed to examine destination dir: %w", err)
} else {
// otherwise need to copy via a temporary directory
}
@@ -1109,17 +1109,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
tmpDir := "rclone-temp-dir-" + random.String(16)
err = f.Mkdir(ctx, tmpDir)
if err != nil {
- return nil, errors.Wrap(err, "copy: failed to make temp dir")
+ return nil, fmt.Errorf("copy: failed to make temp dir: %w", err)
}
defer func() {
rmdirErr := f.Rmdir(ctx, tmpDir)
if rmdirErr != nil && err == nil {
- err = errors.Wrap(rmdirErr, "copy: failed to remove temp dir")
+ err = fmt.Errorf("copy: failed to remove temp dir: %w", rmdirErr)
}
}()
tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false)
if err != nil {
- return nil, errors.Wrap(err, "copy: failed to find temp dir")
+ return nil, fmt.Errorf("copy: failed to find temp dir: %w", err)
}
copyTargetDirID = tmpDirID
}
@@ -1221,7 +1221,7 @@ func (o *Object) Size() int64 {
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.Item) (err error) {
if info.Type != api.ItemTypeFile {
- return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
+ return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile)
}
o.hasMetaData = true
o.size = info.Size
@@ -1302,7 +1302,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "open: fetch download specification")
+ return nil, fmt.Errorf("open: fetch download specification: %w", err)
}
fs.FixRangeOption(options, o.size)
@@ -1317,7 +1317,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "open")
+ return nil, fmt.Errorf("open: %w", err)
}
return resp.Body, err
}
@@ -1373,7 +1373,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "upload get specification")
+ return fmt.Errorf("upload get specification: %w", err)
}
// If file is large then upload in parts
@@ -1398,7 +1398,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "upload file")
+ return fmt.Errorf("upload file: %w", err)
}
return o.checkUploadResponse(ctx, &finish)
}
@@ -1434,7 +1434,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "remove")
+ return fmt.Errorf("remove: %w", err)
}
return nil
}
@@ -1443,7 +1443,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
func (o *Object) Remove(ctx context.Context) error {
err := o.readMetaData(ctx)
if err != nil {
- return errors.Wrap(err, "Remove: Failed to read metadata")
+ return fmt.Errorf("Remove: Failed to read metadata: %w", err)
}
return o.fs.remove(ctx, o.id)
}
diff --git a/backend/sharefile/upload.go b/backend/sharefile/upload.go
index f6ee40c86..eb8358b65 100644
--- a/backend/sharefile/upload.go
+++ b/backend/sharefile/upload.go
@@ -15,7 +15,6 @@ import (
"strings"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/sharefile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -55,7 +54,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
case "threaded":
streamed = false
default:
- return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
+ return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
}
threads := f.ci.Transfers
@@ -87,7 +86,7 @@ func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
err = json.Unmarshal(respBody, &finish)
if err != nil {
// Sometimes the unmarshal fails in which case return the body
- return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
+ return fmt.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
}
return up.o.checkUploadResponse(up.ctx, &finish)
}
@@ -240,7 +239,7 @@ outer:
// check size read is correct
if eof && err == nil && up.size >= 0 && up.size != offset {
- err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
+ err = fmt.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
}
// read any errors
diff --git a/backend/sia/sia.go b/backend/sia/sia.go
index 749445ebf..2017332ce 100644
--- a/backend/sia/sia.go
+++ b/backend/sia/sia.go
@@ -3,6 +3,7 @@ package sia
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -11,18 +12,16 @@ import (
"strings"
"time"
- "github.com/rclone/rclone/fs/config"
- "github.com/rclone/rclone/lib/encoder"
-
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/sia/api"
"github.com/rclone/rclone/fs"
+ "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
+ "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -460,7 +459,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.APIPassword != "" {
opt.APIPassword, err = obscure.Reveal(opt.APIPassword)
if err != nil {
- return nil, errors.Wrap(err, "couldn't decrypt API password")
+ return nil, fmt.Errorf("couldn't decrypt API password: %w", err)
}
f.srv.SetUserPass("", opt.APIPassword)
}
@@ -474,7 +473,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(ctx, remote)
if err != nil {
- if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
+ if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -493,7 +492,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp)
if err != nil {
- return errors.Wrap(err, "error when trying to read error body")
+ return fmt.Errorf("error when trying to read error body: %w", err)
}
// Decode error response
errResponse := new(api.Error)
diff --git a/backend/sugarsync/sugarsync.go b/backend/sugarsync/sugarsync.go
index 35801090b..4b57f4184 100644
--- a/backend/sugarsync/sugarsync.go
+++ b/backend/sugarsync/sugarsync.go
@@ -14,6 +14,7 @@ To work around this we use the remote "TestSugarSync:Test" to test with.
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
@@ -25,7 +26,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/sugarsync/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -79,7 +79,7 @@ func init() {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
- return nil, errors.Wrap(err, "failed to read options")
+ return nil, fmt.Errorf("failed to read options: %w", err)
}
switch config.State {
@@ -124,7 +124,7 @@ func init() {
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
- return nil, errors.Wrap(err, "failed to get token")
+ return nil, fmt.Errorf("failed to get token: %w", err)
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
@@ -309,7 +309,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
if resp != nil && resp.StatusCode == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
- return nil, errors.Wrap(err, "failed to get authorization")
+ return nil, fmt.Errorf("failed to get authorization: %w", err)
}
return info, nil
}
@@ -343,7 +343,7 @@ func (f *Fs) getAuthToken(ctx context.Context) error {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to get authorization")
+ return fmt.Errorf("failed to get authorization: %w", err)
}
f.opt.Authorization = resp.Header.Get("Location")
f.authExpiry = authResponse.Expiration
@@ -391,7 +391,7 @@ func (f *Fs) getUser(ctx context.Context) (user *api.User, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "failed to get user")
+ return nil, fmt.Errorf("failed to get user: %w", err)
}
return user, nil
}
@@ -445,7 +445,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if strings.HasSuffix(f.opt.RootID, "/contents") {
f.opt.RootID = f.opt.RootID[:len(f.opt.RootID)-9]
} else {
- return nil, errors.Errorf("unexpected rootID %q", f.opt.RootID)
+ return nil, fmt.Errorf("unexpected rootID %q", f.opt.RootID)
}
// Cache the results
f.m.Set("root_id", f.opt.RootID)
@@ -497,13 +497,13 @@ var findError = regexp.MustCompile(`
(.*?)
`)
func errorHandler(resp *http.Response) (err error) {
body, err := rest.ReadBody(resp)
if err != nil {
- return errors.Wrap(err, "error reading error out of body")
+ return fmt.Errorf("error reading error out of body: %w", err)
}
match := findError.FindSubmatch(body)
if match == nil || len(match) < 2 || len(match[1]) == 0 {
- return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
+ return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
- return errors.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
+ return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
@@ -596,7 +596,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return "", err
}
if !found {
- return "", errors.Errorf("couldn't find ID for newly created directory %q", leaf)
+ return "", fmt.Errorf("couldn't find ID for newly created directory %q", leaf)
}
}
@@ -636,7 +636,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
if fileFn != nil {
for i := range result.Files {
@@ -873,7 +873,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
- return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
+ return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -1247,7 +1247,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.id == "" {
o.id, err = o.fs.createFile(ctx, directoryID, leaf, fs.MimeType(ctx, src))
if err != nil {
- return errors.Wrap(err, "failed to create file")
+ return fmt.Errorf("failed to create file: %w", err)
}
if o.id == "" {
return errors.New("failed to create file: no ID")
@@ -1280,7 +1280,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "failed to upload file")
+ return fmt.Errorf("failed to upload file: %w", err)
}
o.hasMetaData = false
diff --git a/backend/swift/swift.go b/backend/swift/swift.go
index 0dcead728..03b496301 100644
--- a/backend/swift/swift.go
+++ b/backend/swift/swift.go
@@ -5,6 +5,7 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
"fmt"
"io"
"net/url"
@@ -15,7 +16,6 @@ import (
"github.com/google/uuid"
"github.com/ncw/swift/v2"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -381,7 +381,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
if opt.EnvAuth {
err := c.ApplyEnvironment()
if err != nil {
- return nil, errors.Wrap(err, "failed to read environment variables")
+ return nil, fmt.Errorf("failed to read environment variables: %w", err)
}
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
@@ -423,7 +423,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
- return errors.Errorf("%s is less than %s", cs, minChunkSize)
+ return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
@@ -499,7 +499,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
- return nil, errors.Wrap(err, "swift: chunk size")
+ return nil, fmt.Errorf("swift: chunk size: %w", err)
}
c, err := swiftConnection(ctx, opt, name)
@@ -670,7 +670,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "container listing failed")
+ return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
@@ -762,7 +762,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return shouldRetry(ctx, err)
})
if err != nil {
- return nil, errors.Wrap(err, "container listing failed")
+ return nil, fmt.Errorf("container listing failed: %w", err)
}
var total, objects int64
for _, c := range containers {
diff --git a/backend/tardigrade/fs.go b/backend/tardigrade/fs.go
index 0613ca2d6..afe9f55e5 100644
--- a/backend/tardigrade/fs.go
+++ b/backend/tardigrade/fs.go
@@ -6,13 +6,13 @@ package tardigrade
import (
"context"
+ "errors"
"fmt"
"io"
"path"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -64,12 +64,12 @@ func init() {
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
- return nil, errors.Wrap(err, "couldn't create access grant")
+ return nil, fmt.Errorf("couldn't create access grant: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
- return nil, errors.Wrap(err, "couldn't serialize access grant")
+ return nil, fmt.Errorf("couldn't serialize access grant: %w", err)
}
m.Set("satellite_address", satellite)
m.Set("access_grant", serializedAccess)
@@ -78,7 +78,7 @@ func init() {
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
- return nil, errors.Errorf("invalid provider type: %s", provider)
+ return nil, fmt.Errorf("invalid provider type: %s", provider)
}
return nil, nil
},
@@ -188,24 +188,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
- return nil, errors.Wrap(err, "tardigrade: access")
+ return nil, fmt.Errorf("tardigrade: access: %w", err)
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
- return nil, errors.Wrap(err, "tardigrade: access")
+ return nil, fmt.Errorf("tardigrade: access: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
- return nil, errors.Wrap(err, "tardigrade: access")
+ return nil, fmt.Errorf("tardigrade: access: %w", err)
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
- return nil, errors.Wrap(err, "tardigrade: access")
+ return nil, fmt.Errorf("tardigrade: access: %w", err)
}
}
@@ -237,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
- return f, errors.Wrap(err, "tardigrade: bucket")
+ return f, fmt.Errorf("tardigrade: bucket: %w", err)
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
@@ -274,7 +274,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
- return nil, errors.Wrap(err, "tardigrade: project")
+ return nil, fmt.Errorf("tardigrade: project: %w", err)
}
return
diff --git a/backend/tardigrade/object.go b/backend/tardigrade/object.go
index 581070d69..2d8372a7d 100644
--- a/backend/tardigrade/object.go
+++ b/backend/tardigrade/object.go
@@ -5,11 +5,11 @@ package tardigrade
import (
"context"
+ "errors"
"io"
"path"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
diff --git a/backend/union/entry.go b/backend/union/entry.go
index 31378e710..9142a61a0 100644
--- a/backend/union/entry.go
+++ b/backend/union/entry.go
@@ -2,11 +2,11 @@ package union
import (
"context"
+ "fmt"
"io"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
@@ -82,7 +82,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Update(ctx, readers[i], src, options...)
- errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
+ errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
} else {
errs[i] = fs.ErrorNotAFile
}
@@ -101,7 +101,7 @@ func (o *Object) Remove(ctx context.Context) error {
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Remove(ctx)
- errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
+ errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
} else {
errs[i] = fs.ErrorNotAFile
}
@@ -120,7 +120,7 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.SetModTime(ctx, t)
- errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
+ errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
} else {
errs[i] = fs.ErrorNotAFile
}
diff --git a/backend/union/policy/policy.go b/backend/union/policy/policy.go
index 19cee2e01..d784c8e29 100644
--- a/backend/union/policy/policy.go
+++ b/backend/union/policy/policy.go
@@ -2,12 +2,12 @@ package policy
import (
"context"
+ "fmt"
"math/rand"
"path"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
)
@@ -44,7 +44,7 @@ func registerPolicy(name string, p Policy) {
func Get(name string) (Policy, error) {
p, ok := policies[strings.ToLower(name)]
if !ok {
- return nil, errors.Errorf("didn't find policy called %q", name)
+ return nil, fmt.Errorf("didn't find policy called %q", name)
}
return p, nil
}
diff --git a/backend/union/union.go b/backend/union/union.go
index 7c3951771..c3fb2e26b 100644
--- a/backend/union/union.go
+++ b/backend/union/union.go
@@ -3,6 +3,7 @@ package union
import (
"bufio"
"context"
+ "errors"
"fmt"
"io"
"path"
@@ -11,7 +12,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/union/policy"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
@@ -99,7 +99,7 @@ func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
cd: entries,
}, nil
default:
- return nil, errors.Errorf("unknown object type %T", e)
+ return nil, fmt.Errorf("unknown object type %T", e)
}
}
@@ -132,7 +132,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Rmdir(ctx, dir)
- errs[i] = errors.Wrap(err, upstreams[i].Name())
+ errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
})
return errs.Err()
}
@@ -162,7 +162,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Mkdir(ctx, dir)
- errs[i] = errors.Wrap(err, upstreams[i].Name())
+ errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
})
return errs.Err()
}
@@ -186,10 +186,10 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Features().Purge(ctx, dir)
- if errors.Cause(err) == fs.ErrorDirNotFound {
+ if errors.Is(err, fs.ErrorDirNotFound) {
err = nil
}
- errs[i] = errors.Wrap(err, upstreams[i].Name())
+ errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
})
return errs.Err()
}
@@ -264,7 +264,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
su := entries[i].UpstreamFs()
o, ok := entries[i].(*upstream.Object)
if !ok {
- errs[i] = errors.Wrap(fs.ErrorNotAFile, su.Name())
+ errs[i] = fmt.Errorf("%s: %w", su.Name(), fs.ErrorNotAFile)
return
}
var du *upstream.Fs
@@ -274,7 +274,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
}
if du == nil {
- errs[i] = errors.Wrap(fs.ErrorCantMove, su.Name()+":"+remote)
+ errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), remote, fs.ErrorCantMove)
return
}
srcObj := o.UnWrap()
@@ -286,7 +286,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Do the Move or Copy
dstObj, err := do(ctx, srcObj, remote)
if err != nil || dstObj == nil {
- errs[i] = errors.Wrap(err, su.Name())
+ errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
return
}
objs[i] = du.WrapObject(dstObj)
@@ -294,7 +294,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if duFeatures.Move == nil {
err = srcObj.Remove(ctx)
if err != nil {
- errs[i] = errors.Wrap(err, su.Name())
+ errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
return
}
}
@@ -345,18 +345,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
}
if du == nil {
- errs[i] = errors.Wrap(fs.ErrorCantDirMove, su.Name()+":"+su.Root())
+ errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), su.Root(), fs.ErrorCantDirMove)
return
}
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
- errs[i] = errors.Wrap(err, du.Name()+":"+du.Root())
+ errs[i] = fmt.Errorf("%s: %w", du.Name()+":"+du.Root(), err)
})
errs = errs.FilterNil()
if len(errs) == 0 {
return nil
}
for _, e := range errs {
- if errors.Cause(e) != fs.ErrorDirExists {
+ if !errors.Is(e, fs.ErrorDirExists) {
return errs
}
}
@@ -477,7 +477,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
o, err = u.Put(ctx, readers[i], src, options...)
}
if err != nil {
- errs[i] = errors.Wrap(err, u.Name())
+ errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
objs[i] = u.WrapObject(o)
@@ -537,7 +537,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
for _, u := range f.upstreams {
usg, err := u.About(ctx)
- if errors.Cause(err) == fs.ErrorDirNotFound {
+ if errors.Is(err, fs.ErrorDirNotFound) {
continue
}
if err != nil {
@@ -593,7 +593,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
u := f.upstreams[i]
entries, err := u.List(ctx, dir)
if err != nil {
- errs[i] = errors.Wrap(err, u.Name())
+ errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
uEntries := make([]upstream.Entry, len(entries))
@@ -604,7 +604,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
- if errors.Cause(e) == fs.ErrorDirNotFound {
+ if errors.Is(e, fs.ErrorDirNotFound) {
return nil
}
return e
@@ -657,13 +657,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
}
if err != nil {
- errs[i] = errors.Wrap(err, u.Name())
+ errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
- if errors.Cause(e) == fs.ErrorDirNotFound {
+ if errors.Is(e, fs.ErrorDirNotFound) {
return nil
}
return e
@@ -688,7 +688,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
u := f.upstreams[i]
o, err := u.NewObject(ctx, remote)
if err != nil && err != fs.ErrorObjectNotFound {
- errs[i] = errors.Wrap(err, u.Name())
+ errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
return
}
objs[i] = u.WrapObject(o)
@@ -777,7 +777,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
u := f.upstreams[i]
if do := u.Features().Shutdown; do != nil {
err := do(ctx)
- errs[i] = errors.Wrap(err, u.Name())
+ errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
}
})
return errs.Err()
diff --git a/backend/union/upstream/upstream.go b/backend/union/upstream/upstream.go
index 93bce52e9..93b8ae521 100644
--- a/backend/union/upstream/upstream.go
+++ b/backend/union/upstream/upstream.go
@@ -2,6 +2,8 @@ package upstream
import (
"context"
+ "errors"
+ "fmt"
"io"
"math"
"path"
@@ -11,7 +13,6 @@ import (
"sync/atomic"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
@@ -133,7 +134,7 @@ func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
case fs.Directory:
return f.WrapDirectory(e.(fs.Directory)), nil
default:
- return nil, errors.Errorf("unknown object type %T", e)
+ return nil, fmt.Errorf("unknown object type %T", e)
}
}
@@ -335,7 +336,7 @@ func (f *Fs) updateUsageCore(lock bool) error {
usage, err := f.RootFs.Features().About(ctx)
if err != nil {
f.cacheUpdate = false
- if errors.Cause(err) == fs.ErrorDirNotFound {
+ if errors.Is(err, fs.ErrorDirNotFound) {
err = nil
}
return err
diff --git a/backend/uptobox/uptobox.go b/backend/uptobox/uptobox.go
index 3f1448a30..94ef96fd9 100644
--- a/backend/uptobox/uptobox.go
+++ b/backend/uptobox/uptobox.go
@@ -3,6 +3,7 @@ package uptobox
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -14,7 +15,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/uptobox/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -408,7 +408,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, filename
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't upload file")
+ return nil, fmt.Errorf("couldn't upload file: %w", err)
}
return &ul, nil
}
@@ -438,10 +438,10 @@ func (f *Fs) move(ctx context.Context, dstPath string, fileID string) (err error
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't move file")
+ return fmt.Errorf("couldn't move file: %w", err)
}
if info.StatusCode != 0 {
- return errors.Errorf("move: api error: %d - %s", info.StatusCode, info.Message)
+ return fmt.Errorf("move: api error: %d - %s", info.StatusCode, info.Message)
}
return err
}
@@ -460,10 +460,10 @@ func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileIn
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't update file info")
+ return fmt.Errorf("couldn't update file info: %w", err)
}
if info.StatusCode != 0 {
- return errors.Errorf("updateFileInfo: api error: %d - %s", info.StatusCode, info.Message)
+ return fmt.Errorf("updateFileInfo: api error: %d - %s", info.StatusCode, info.Message)
}
return err
}
@@ -493,7 +493,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
if info.StatusCode != 0 {
- return nil, errors.Errorf("putUnchecked: api error: %d - %s", info.StatusCode, info.Message)
+ return nil, fmt.Errorf("putUnchecked: api error: %d - %s", info.StatusCode, info.Message)
}
// we need to have a safe name for the upload to work
tmpName := "rcloneTemp" + random.String(8)
@@ -681,7 +681,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if needMove {
err := f.mkDirs(ctx, strings.Trim(dstBase, "/"))
if err != nil {
- return nil, errors.Wrap(err, "move: failed to make destination dirs")
+ return nil, fmt.Errorf("move: failed to make destination dirs: %w", err)
}
err = f.move(ctx, dstBase, srcObj.code)
@@ -694,7 +694,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if needRename {
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: srcObj.code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
if err != nil {
- return nil, errors.Wrap(err, "move: failed final rename")
+ return nil, fmt.Errorf("move: failed final rename: %w", err)
}
}
@@ -751,7 +751,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
srcPath := srcFs.dirPath(srcRemote)
srcInfo, err := f.readMetaDataForPath(ctx, srcPath, &api.MetadataRequestOptions{Limit: 1})
if err != nil {
- return errors.Wrap(err, "dirmove: source not found")
+ return fmt.Errorf("dirmove: source not found: %w", err)
}
// check if the destination allready exists
dstPath := f.dirPath(dstRemote)
@@ -764,13 +764,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstBase, dstName := f.splitPathFull(dstRemote)
err = f.mkDirs(ctx, strings.Trim(dstBase, "/"))
if err != nil {
- return errors.Wrap(err, "dirmove: failed to create dirs")
+ return fmt.Errorf("dirmove: failed to create dirs: %w", err)
}
// find the destination parent dir
dstInfo, err = f.readMetaDataForPath(ctx, dstBase, &api.MetadataRequestOptions{Limit: 1})
if err != nil {
- return errors.Wrap(err, "dirmove: failed to read destination")
+ return fmt.Errorf("dirmove: failed to read destination: %w", err)
}
srcBase, srcName := srcFs.splitPathFull(srcRemote)
@@ -784,7 +784,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
tmpName := "rcloneTemp" + random.String(8)
err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, tmpName)
if err != nil {
- return errors.Wrap(err, "dirmove: failed initial rename")
+ return fmt.Errorf("dirmove: failed initial rename: %w", err)
}
}
@@ -807,7 +807,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "dirmove: failed to move")
+ return fmt.Errorf("dirmove: failed to move: %w", err)
}
if apiErr.StatusCode != 0 {
return apiErr
@@ -818,7 +818,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if needRename {
err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, dstName)
if err != nil {
- return errors.Wrap(err, "dirmove: failed final rename")
+ return fmt.Errorf("dirmove: failed final rename: %w", err)
}
}
return nil
@@ -848,10 +848,10 @@ func (f *Fs) copy(ctx context.Context, dstPath string, fileID string) (err error
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "couldn't copy file")
+ return fmt.Errorf("couldn't copy file: %w", err)
}
if info.StatusCode != 0 {
- return errors.Errorf("copy: api error: %d - %s", info.StatusCode, info.Message)
+ return fmt.Errorf("copy: api error: %d - %s", info.StatusCode, info.Message)
}
return err
}
@@ -871,7 +871,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
err := f.mkDirs(ctx, path.Join(f.root, dstBase))
if err != nil {
- return nil, errors.Wrap(err, "copy: failed to make destination dirs")
+ return nil, fmt.Errorf("copy: failed to make destination dirs: %w", err)
}
err = f.copy(ctx, f.dirPath(dstBase), srcObj.code)
@@ -881,13 +881,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
newObj, err := f.NewObject(ctx, path.Join(dstBase, srcLeaf))
if err != nil {
- return nil, errors.Wrap(err, "copy: couldn't find copied object")
+ return nil, fmt.Errorf("copy: couldn't find copied object: %w", err)
}
if needRename {
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: newObj.(*Object).code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
if err != nil {
- return nil, errors.Wrap(err, "copy: failed final rename")
+ return nil, fmt.Errorf("copy: failed final rename: %w", err)
}
newObj.(*Object).remote = remote
}
@@ -970,7 +970,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "open: failed to get download link")
+ return nil, fmt.Errorf("open: failed to get download link: %w", err)
}
fs.FixRangeOption(options, o.size)
@@ -1010,7 +1010,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// delete duplicate object after successful upload
err = o.Remove(ctx)
if err != nil {
- return errors.Wrap(err, "failed to remove old version")
+ return fmt.Errorf("failed to remove old version: %w", err)
}
// Replace guts of old object with new one
@@ -1038,7 +1038,7 @@ func (o *Object) Remove(ctx context.Context) error {
return err
}
if info.StatusCode != 0 {
- return errors.Errorf("remove: api error: %d - %s", info.StatusCode, info.Message)
+ return fmt.Errorf("remove: api error: %d - %s", info.StatusCode, info.Message)
}
return nil
}
diff --git a/backend/webdav/odrvcookie/fetch.go b/backend/webdav/odrvcookie/fetch.go
index 5e7f84bf9..f6b25d29e 100644
--- a/backend/webdav/odrvcookie/fetch.go
+++ b/backend/webdav/odrvcookie/fetch.go
@@ -13,7 +13,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"golang.org/x/net/publicsuffix"
@@ -122,12 +121,12 @@ func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) {
func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieResponse, error) {
spRoot, err := url.Parse(ca.endpoint)
if err != nil {
- return nil, errors.Wrap(err, "Error while constructing endpoint URL")
+ return nil, fmt.Errorf("error while constructing endpoint URL: %w", err)
}
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
if err != nil {
- return nil, errors.Wrap(err, "Error while constructing login URL")
+ return nil, fmt.Errorf("error while constructing login URL: %w", err)
}
// To authenticate with davfs or anything else we need two cookies (rtFa and FedAuth)
@@ -143,7 +142,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
// Send the previously acquired Token as a Post parameter
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Body.Token)); err != nil {
- return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
+ return nil, fmt.Errorf("error while grabbing cookies from endpoint: %w", err)
}
cookieResponse := CookieResponse{}
@@ -171,7 +170,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
buf := &bytes.Buffer{}
if err := t.Execute(buf, reqData); err != nil {
- return nil, errors.Wrap(err, "Error while filling auth token template")
+ return nil, fmt.Errorf("error while filling auth token template: %w", err)
}
// Create and execute the first request which returns an auth token for the sharepoint service
@@ -184,7 +183,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
client := fshttp.NewClient(ctx)
resp, err := client.Do(req)
if err != nil {
- return nil, errors.Wrap(err, "Error while logging in to endpoint")
+ return nil, fmt.Errorf("error while logging in to endpoint: %w", err)
}
defer fs.CheckClose(resp.Body, &err)
@@ -209,7 +208,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
}
if err != nil {
- return nil, errors.Wrap(err, "Error while reading endpoint response")
+ return nil, fmt.Errorf("error while reading endpoint response: %w", err)
}
return
}
diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index 12f312530..9564ae601 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -12,6 +12,7 @@ import (
"context"
"crypto/tls"
"encoding/xml"
+ "errors"
"fmt"
"io"
"net/http"
@@ -23,7 +24,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/webdav/api"
"github.com/rclone/rclone/backend/webdav/odrvcookie"
"github.com/rclone/rclone/fs"
@@ -303,7 +303,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
}
}
if err != nil {
- return nil, errors.Wrap(err, "read metadata failed")
+ return nil, fmt.Errorf("read metadata failed: %w", err)
}
if len(result.Responses) < 1 {
return nil, fs.ErrorObjectNotFound
@@ -322,7 +322,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp)
if err != nil {
- return errors.Wrap(err, "error when trying to read error from body")
+ return fmt.Errorf("error when trying to read error from body: %w", err)
}
// Decode error response
errResponse := new(api.Error)
@@ -387,7 +387,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
- return nil, errors.Wrap(err, "couldn't decrypt password")
+ return nil, fmt.Errorf("couldn't decrypt password: %w", err)
}
}
if opt.Vendor == "" {
@@ -465,7 +465,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
_, err := f.NewObject(ctx, remote)
if err != nil {
- if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorIsDir {
+ if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorIsDir) {
// File doesn't exist so return old f
f.root = root
return f, nil
@@ -503,7 +503,7 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
if stderrString == "" {
stderrString = stdoutString
}
- return "", errors.Wrapf(err, "failed to get bearer token using %q: %s", f.opt.BearerTokenCommand, stderrString)
+ return "", fmt.Errorf("failed to get bearer token using %q: %s: %w", f.opt.BearerTokenCommand, stderrString, err)
}
return stdoutString, nil
}
@@ -673,12 +673,12 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
return found, fs.ErrorDirNotFound
}
}
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
//fmt.Printf("result = %#v", &result)
baseURL, err := rest.URLJoin(f.endpoint, opts.Path)
if err != nil {
- return false, errors.Wrap(err, "couldn't join URL")
+ return false, fmt.Errorf("couldn't join URL: %w", err)
}
for i := range result.Responses {
item := &result.Responses[i]
@@ -947,7 +947,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "rmdir failed")
+ return fmt.Errorf("rmdir failed: %w", err)
}
// FIXME parse Multistatus response
return nil
@@ -986,11 +986,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
dstPath := f.filePath(remote)
err := f.mkParentDir(ctx, dstPath)
if err != nil {
- return nil, errors.Wrap(err, "Copy mkParentDir failed")
+ return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
}
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
if err != nil {
- return nil, errors.Wrap(err, "copyOrMove couldn't join URL")
+ return nil, fmt.Errorf("copyOrMove couldn't join URL: %w", err)
}
var resp *http.Response
opts := rest.Opts{
@@ -1010,11 +1010,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "Copy call failed")
+ return nil, fmt.Errorf("Copy call failed: %w", err)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
- return nil, errors.Wrap(err, "Copy NewObject failed")
+ return nil, fmt.Errorf("Copy NewObject failed: %w", err)
}
return dstObj, nil
}
@@ -1077,18 +1077,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
if err != fs.ErrorDirNotFound {
- return errors.Wrap(err, "DirMove dirExists dst failed")
+ return fmt.Errorf("DirMove dirExists dst failed: %w", err)
}
// Make sure the parent directory exists
err = f.mkParentDir(ctx, dstPath)
if err != nil {
- return errors.Wrap(err, "DirMove mkParentDir dst failed")
+ return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
}
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
if err != nil {
- return errors.Wrap(err, "DirMove couldn't join URL")
+ return fmt.Errorf("DirMove couldn't join URL: %w", err)
}
var resp *http.Response
@@ -1106,7 +1106,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "DirMove MOVE call failed")
+ return fmt.Errorf("DirMove MOVE call failed: %w", err)
}
return nil
}
@@ -1148,7 +1148,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "about call failed")
+ return nil, fmt.Errorf("about call failed: %w", err)
}
usage := &fs.Usage{}
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {
@@ -1289,7 +1289,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
err = o.fs.mkParentDir(ctx, o.filePath())
if err != nil {
- return errors.Wrap(err, "Update mkParentDir failed")
+ return fmt.Errorf("Update mkParentDir failed: %w", err)
}
size := src.Size()
diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go
index d1fa3321b..abcc24c27 100644
--- a/backend/yandex/yandex.go
+++ b/backend/yandex/yandex.go
@@ -3,6 +3,7 @@ package yandex
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"log"
@@ -13,7 +14,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/yandex/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -249,7 +249,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
token, err := oauthutil.GetToken(name, m)
if err != nil {
- return nil, errors.Wrap(err, "couldn't read OAuth token")
+ return nil, fmt.Errorf("couldn't read OAuth token: %w", err)
}
if token.RefreshToken == "" {
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
@@ -258,13 +258,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
token.TokenType = "OAuth"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
- return nil, errors.Wrap(err, "couldn't save OAuth token")
+ return nil, fmt.Errorf("couldn't save OAuth token: %w", err)
}
log.Printf("Automatically upgraded OAuth config.")
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure Yandex")
+ return nil, fmt.Errorf("failed to configure Yandex: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -309,7 +309,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.Reso
case "dir":
t, err := time.Parse(time.RFC3339Nano, object.Modified)
if err != nil {
- return nil, errors.Wrap(err, "error parsing time in directory item")
+ return nil, fmt.Errorf("error parsing time in directory item: %w", err)
}
d := fs.NewDir(remote, t).SetSize(object.Size)
return d, nil
@@ -560,19 +560,19 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
var status api.AsyncStatus
err = json.Unmarshal(body, &status)
if err != nil {
- return errors.Wrapf(err, "async status result not JSON: %q", body)
+ return fmt.Errorf("async status result not JSON: %q: %w", body, err)
}
switch status.Status {
case "failure":
- return errors.Errorf("async operation returned %q", status.Status)
+ return fmt.Errorf("async operation returned %q", status.Status)
case "success":
return nil
}
time.Sleep(1 * time.Second)
}
- return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
+ return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
}
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
@@ -607,7 +607,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
var info api.AsyncInfo
err = json.Unmarshal(body, &info)
if err != nil {
- return errors.Wrapf(err, "async info result not JSON: %q", body)
+ return fmt.Errorf("async info result not JSON: %q: %w", body, err)
}
return f.waitForJob(ctx, info.HRef)
}
@@ -623,7 +623,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
//send request to get list of objects in this directory.
info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{})
if err != nil {
- return errors.Wrap(err, "rmdir failed")
+ return fmt.Errorf("rmdir failed: %w", err)
}
if len(info.Embedded.Items) != 0 {
return fs.ErrorDirectoryNotEmpty
@@ -683,7 +683,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
var info api.AsyncInfo
err = json.Unmarshal(body, &info)
if err != nil {
- return errors.Wrapf(err, "async info result not JSON: %q", body)
+ return fmt.Errorf("async info result not JSON: %q: %w", body, err)
}
return f.waitForJob(ctx, info.HRef)
}
@@ -714,7 +714,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
if err != nil {
- return nil, errors.Wrap(err, "couldn't copy file")
+ return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return f.NewObject(ctx, remote)
@@ -744,7 +744,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false)
if err != nil {
- return nil, errors.Wrap(err, "couldn't move file")
+ return nil, fmt.Errorf("couldn't move file: %w", err)
}
return f.NewObject(ctx, remote)
@@ -795,7 +795,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
err = f.copyOrMove(ctx, "move", srcPath, dstPath, false)
if err != nil {
- return errors.Wrap(err, "couldn't move directory")
+ return fmt.Errorf("couldn't move directory: %w", err)
}
return nil
}
@@ -831,9 +831,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
if err != nil {
if unlink {
- return "", errors.Wrap(err, "couldn't remove public link")
+ return "", fmt.Errorf("couldn't remove public link: %w", err)
}
- return "", errors.Wrap(err, "couldn't create public link")
+ return "", fmt.Errorf("couldn't create public link: %w", err)
}
info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{})
@@ -934,7 +934,7 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
}
t, err := time.Parse(time.RFC3339Nano, modTimeString)
if err != nil {
- return errors.Wrapf(err, "failed to parse modtime from %q", modTimeString)
+ return fmt.Errorf("failed to parse modtime from %q: %w", modTimeString, err)
}
o.modTime = t
return nil
diff --git a/backend/zoho/zoho.go b/backend/zoho/zoho.go
index 117ba74e3..2132037c6 100644
--- a/backend/zoho/zoho.go
+++ b/backend/zoho/zoho.go
@@ -4,6 +4,7 @@ package zoho
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -14,7 +15,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
@@ -81,7 +81,7 @@ func init() {
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
- return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
+ return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
}
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
@@ -100,13 +100,13 @@ func init() {
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
- return nil, errors.Wrap(err, "failed to read token")
+ return nil, fmt.Errorf("failed to read token: %w", err)
}
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
- return nil, errors.Wrap(err, "failed to configure token")
+ return nil, fmt.Errorf("failed to configure token: %w", err)
}
}
@@ -478,7 +478,7 @@ OUTER:
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return found, errors.Wrap(err, "couldn't list files")
+ return found, fmt.Errorf("couldn't list files: %w", err)
}
if len(result.Items) == 0 {
break
@@ -670,7 +670,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
if err != nil {
- return nil, errors.Wrap(err, "failed to make multipart upload")
+ return nil, fmt.Errorf("failed to make multipart upload: %w", err)
}
contentLength := overhead + size
@@ -692,7 +692,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "upload error")
+ return nil, fmt.Errorf("upload error: %w", err)
}
if len(uploadResponse.Uploads) != 1 {
return nil, errors.New("upload: invalid response")
@@ -774,7 +774,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return errors.Wrap(err, "delete object failed")
+ return fmt.Errorf("delete object failed: %w", err)
}
return nil
}
@@ -801,7 +801,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
err = f.deleteObject(ctx, rootID)
if err != nil {
- return errors.Wrap(err, "rmdir failed")
+ return fmt.Errorf("rmdir failed: %w", err)
}
f.dirCache.FlushDir(dir)
return nil
@@ -844,7 +844,7 @@ func (f *Fs) rename(ctx context.Context, id, name string) (item *api.Item, err e
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "rename failed")
+ return nil, fmt.Errorf("rename failed: %w", err)
}
return &result.Item, nil
}
@@ -897,7 +897,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "couldn't copy file")
+ return nil, fmt.Errorf("couldn't copy file: %w", err)
}
// Server acts weird some times make sure we actually got
// an item
@@ -911,7 +911,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// the correct name after copy
if f.opt.Enc.ToStandardName(result.Items[0].Attributes.Name) != leaf {
if err = dstObject.rename(ctx, leaf); err != nil {
- return nil, errors.Wrap(err, "copy: couldn't rename copied file")
+ return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
}
}
return dstObject, nil
@@ -942,7 +942,7 @@ func (f *Fs) move(ctx context.Context, srcID, parentID string) (item *api.Item,
return shouldRetry(ctx, resp, err)
})
if err != nil {
- return nil, errors.Wrap(err, "move failed")
+ return nil, fmt.Errorf("move failed: %w", err)
}
// Server acts weird some times make sure our array actually contains
// a file
@@ -992,7 +992,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if needRename && needMove {
tmpLeaf := "rcloneTemp" + random.String(8)
if err = srcObj.rename(ctx, tmpLeaf); err != nil {
- return nil, errors.Wrap(err, "move: pre move rename failed")
+ return nil, fmt.Errorf("move: pre move rename failed: %w", err)
}
}
@@ -1012,7 +1012,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// rename the leaf to its final name
if needRename {
if err = dstObject.rename(ctx, dstLeaf); err != nil {
- return nil, errors.Wrap(err, "move: couldn't rename moved file")
+ return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
}
}
return dstObject, nil
@@ -1046,7 +1046,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// do the move
_, err = f.move(ctx, srcID, dstDirectoryID)
if err != nil {
- return errors.Wrap(err, "couldn't dir move")
+ return fmt.Errorf("couldn't dir move: %w", err)
}
// Can't copy and change name in one step so we have to check if we have
@@ -1054,7 +1054,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if srcLeaf != dstLeaf {
_, err = f.rename(ctx, srcID, dstLeaf)
if err != nil {
- return errors.Wrap(err, "dirmove: couldn't rename moved dir")
+ return fmt.Errorf("dirmove: couldn't rename moved dir: %w", err)
}
}
srcFs.dirCache.FlushDir(srcRemote)
@@ -1261,7 +1261,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// upload was successfull, need to delete old object before rename
if err = o.Remove(ctx); err != nil {
- return errors.Wrap(err, "failed to remove old object")
+ return fmt.Errorf("failed to remove old object: %w", err)
}
if err = o.setMetaData(info); err != nil {
return err
diff --git a/bin/not-in-stable.go b/bin/not-in-stable.go
old mode 100755
new mode 100644
diff --git a/cmd/about/about.go b/cmd/about/about.go
index 2e13fd126..fd59db672 100644
--- a/cmd/about/about.go
+++ b/cmd/about/about.go
@@ -3,10 +3,10 @@ package about
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
@@ -98,11 +98,11 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
cmd.Run(false, false, command, func() error {
doAbout := f.Features().About
if doAbout == nil {
- return errors.Errorf("%v doesn't support about", f)
+ return fmt.Errorf("%v doesn't support about", f)
}
u, err := doAbout(context.Background())
if err != nil {
- return errors.Wrap(err, "About call failed")
+ return fmt.Errorf("About call failed: %w", err)
}
if u == nil {
return errors.New("nil usage returned")
diff --git a/cmd/backend/backend.go b/cmd/backend/backend.go
index 195758991..07f7cba57 100644
--- a/cmd/backend/backend.go
+++ b/cmd/backend/backend.go
@@ -7,7 +7,6 @@ import (
"os"
"sort"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/rc"
"github.com/rclone/rclone/fs"
@@ -88,14 +87,14 @@ Note to run these commands on a running backend then see
default:
doCommand := f.Features().Command
if doCommand == nil {
- return errors.Errorf("%v: doesn't support backend commands", f)
+ return fmt.Errorf("%v: doesn't support backend commands", f)
}
arg := args[2:]
opt := rc.ParseOptions(options)
out, err = doCommand(context.Background(), name, arg, opt)
}
if err != nil {
- return errors.Wrapf(err, "command %q failed", name)
+ return fmt.Errorf("command %q failed: %w", name, err)
}
// Output the result
@@ -121,7 +120,7 @@ Note to run these commands on a running backend then see
enc.SetIndent("", "\t")
err = enc.Encode(out)
if err != nil {
- return errors.Wrap(err, "failed to write JSON")
+ return fmt.Errorf("failed to write JSON: %w", err)
}
}
return nil
@@ -135,7 +134,7 @@ func showHelp(fsInfo *fs.RegInfo) error {
cmds := fsInfo.CommandHelp
name := fsInfo.Name
if len(cmds) == 0 {
- return errors.Errorf("%s backend has no commands", name)
+ return fmt.Errorf("%s backend has no commands", name)
}
fmt.Printf("## Backend commands\n\n")
fmt.Printf(`Here are the commands specific to the %s backend.
diff --git a/cmd/bisync/bisync_test.go b/cmd/bisync/bisync_test.go
index 9f88970ed..52417b490 100644
--- a/cmd/bisync/bisync_test.go
+++ b/cmd/bisync/bisync_test.go
@@ -7,6 +7,7 @@ package bisync_test
import (
"bytes"
"context"
+ "errors"
"flag"
"fmt"
"io/ioutil"
@@ -36,7 +37,6 @@ import (
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/random"
- "github.com/pkg/errors"
"github.com/pmezard/go-difflib/difflib"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -541,7 +541,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
case "bisync":
return b.runBisync(ctx, args[1:])
default:
- return errors.Errorf("unknown command: %q", args[0])
+ return fmt.Errorf("unknown command: %q", args[0])
}
}
@@ -635,7 +635,7 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
fs1 = addSubdir(b.path1, val)
fs2 = addSubdir(b.path2, val)
default:
- return errors.Errorf("invalid bisync option %q", arg)
+ return fmt.Errorf("invalid bisync option %q", arg)
}
}
@@ -793,12 +793,12 @@ func touchFiles(ctx context.Context, dateStr string, f fs.Fs, dir, glob string)
date, err := time.ParseInLocation(touchDateFormat, dateStr, bisync.TZ)
if err != nil {
- return files, errors.Wrapf(err, "invalid date %q", dateStr)
+ return files, fmt.Errorf("invalid date %q: %w", dateStr, err)
}
matcher, firstErr := filter.GlobToRegexp(glob, false)
if firstErr != nil {
- return files, errors.Errorf("invalid glob %q", glob)
+ return files, fmt.Errorf("invalid glob %q", glob)
}
entries, firstErr := f.List(ctx, "")
diff --git a/cmd/bisync/cmd.go b/cmd/bisync/cmd.go
index fbcb27d11..42b6489bf 100644
--- a/cmd/bisync/cmd.go
+++ b/cmd/bisync/cmd.go
@@ -6,6 +6,8 @@ import (
"context"
"crypto/md5"
"encoding/hex"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -21,7 +23,6 @@ import (
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -82,7 +83,7 @@ func (x *CheckSyncMode) Set(s string) error {
case "only":
*x = CheckSyncOnly
default:
- return errors.Errorf("unknown check-sync mode for bisync: %q", s)
+ return fmt.Errorf("unknown check-sync mode for bisync: %q", s)
}
return nil
}
@@ -184,7 +185,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
f, err := os.Open(filtersFile)
if err != nil {
- return ctx, errors.Errorf("specified filters file does not exist: %s", filtersFile)
+ return ctx, fmt.Errorf("specified filters file does not exist: %s", filtersFile)
}
fs.Infof(nil, "Using filters file %s", filtersFile)
@@ -199,11 +200,11 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
hashFile := filtersFile + ".md5"
wantHash, err := ioutil.ReadFile(hashFile)
if err != nil && !opt.Resync {
- return ctx, errors.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
+ return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
}
if gotHash != string(wantHash) && !opt.Resync {
- return ctx, errors.Errorf("filters file has changed (must run --resync): %s", filtersFile)
+ return ctx, fmt.Errorf("filters file has changed (must run --resync): %s", filtersFile)
}
if opt.Resync {
@@ -218,7 +219,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
filterOpt.FilterFrom = append([]string{filtersFile}, filterOpt.FilterFrom...)
newFilter, err := filter.NewFilter(&filterOpt)
if err != nil {
- return ctx, errors.Wrapf(err, "invalid filters file: %s", filtersFile)
+ return ctx, fmt.Errorf("invalid filters file: %s: %w", filtersFile, err)
}
return filter.ReplaceConfig(ctx, newFilter), nil
diff --git a/cmd/bisync/deltas.go b/cmd/bisync/deltas.go
index 0e09f15f2..ee66980c4 100644
--- a/cmd/bisync/deltas.go
+++ b/cmd/bisync/deltas.go
@@ -4,10 +4,10 @@ package bisync
import (
"context"
+ "fmt"
"path/filepath"
"sort"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
@@ -201,7 +201,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
b.indent("!WARNING", file, "New or changed in both paths")
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
- err = errors.Wrapf(err, "path1 rename failed for %s", p1)
+ err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
b.critical = true
return
}
@@ -210,7 +210,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
- err = errors.Wrapf(err, "path2 rename failed for %s", file)
+ err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
return
}
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
diff --git a/cmd/bisync/listing.go b/cmd/bisync/listing.go
index 3b08bc482..610823c64 100644
--- a/cmd/bisync/listing.go
+++ b/cmd/bisync/listing.go
@@ -14,7 +14,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
@@ -246,7 +245,7 @@ func parseHash(str string) (string, string, error) {
return name, val, nil
}
}
- return "", "", errors.Errorf("invalid hash %q", str)
+ return "", "", fmt.Errorf("invalid hash %q", str)
}
// makeListing will produce listing from directory tree and write it to a file
@@ -301,5 +300,5 @@ func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
}
fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing)
b.critical = true
- return errors.Errorf("empty %s listing: %s", msg, listing)
+ return fmt.Errorf("empty %s listing: %s", msg, listing)
}
diff --git a/cmd/bisync/operations.go b/cmd/bisync/operations.go
index e7d853cd1..d32153edc 100644
--- a/cmd/bisync/operations.go
+++ b/cmd/bisync/operations.go
@@ -5,13 +5,14 @@ package bisync
import (
"context"
+ "errors"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
gosync "sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
@@ -60,10 +61,10 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
}
if b.workDir, err = filepath.Abs(opt.Workdir); err != nil {
- return errors.Wrap(err, "failed to make workdir absolute")
+ return fmt.Errorf("failed to make workdir absolute: %w", err)
}
if err = os.MkdirAll(b.workDir, os.ModePerm); err != nil {
- return errors.Wrap(err, "failed to create workdir")
+ return fmt.Errorf("failed to create workdir: %w", err)
}
// Produce a unique name for the sync operation
@@ -76,12 +77,12 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
if !opt.DryRun {
lockFile = b.basePath + ".lck"
if bilib.FileExists(lockFile) {
- return errors.Errorf("prior lock file found: %s", lockFile)
+ return fmt.Errorf("prior lock file found: %s", lockFile)
}
pidStr := []byte(strconv.Itoa(os.Getpid()))
if err = ioutil.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
- return errors.Wrapf(err, "cannot create lock file: %s", lockFile)
+ return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
}
fs.Debugf(nil, "Lock file created: %s", lockFile)
}
@@ -394,11 +395,11 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
func (b *bisyncRun) checkSync(listing1, listing2 string) error {
files1, err := b.loadListing(listing1)
if err != nil {
- return errors.Wrap(err, "cannot read prior listing of Path1")
+ return fmt.Errorf("cannot read prior listing of Path1: %w", err)
}
files2, err := b.loadListing(listing2)
if err != nil {
- return errors.Wrap(err, "cannot read prior listing of Path2")
+ return fmt.Errorf("cannot read prior listing of Path2: %w", err)
}
ok := true
diff --git a/cmd/bisync/rc.go b/cmd/bisync/rc.go
index fe74af052..04f723f03 100644
--- a/cmd/bisync/rc.go
+++ b/cmd/bisync/rc.go
@@ -2,9 +2,9 @@ package bisync
import (
"context"
+ "errors"
"log"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
diff --git a/cmd/cachestats/cachestats.go b/cmd/cachestats/cachestats.go
index 1daaed37b..16a1398d3 100644
--- a/cmd/cachestats/cachestats.go
+++ b/cmd/cachestats/cachestats.go
@@ -7,7 +7,6 @@ import (
"encoding/json"
"fmt"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
@@ -39,7 +38,7 @@ Print cache stats for a remote in JSON format
fsCache, ok = unwrap().(*cache.Fs)
}
if !ok {
- return errors.Errorf("%s: is not a cache remote", fsrc.Name())
+ return fmt.Errorf("%s: is not a cache remote", fsrc.Name())
}
}
m, err := fsCache.Stats()
diff --git a/cmd/cmd.go b/cmd/cmd.go
index 2de68715a..a9f0e910f 100644
--- a/cmd/cmd.go
+++ b/cmd/cmd.go
@@ -8,6 +8,7 @@ package cmd
import (
"context"
+ "errors"
"fmt"
"log"
"os"
@@ -21,7 +22,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -117,7 +117,7 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
f, fileName := NewFsFile(remote)
if fileName != "" {
if !fi.InActive() {
- err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
+ err := fmt.Errorf("Can't limit to single files when using filters: %v", remote)
err = fs.CountError(err)
log.Fatalf(err.Error())
}
@@ -478,16 +478,14 @@ func resolveExitCode(err error) {
os.Exit(exitcode.Success)
}
- _, unwrapped := fserrors.Cause(err)
-
switch {
- case unwrapped == fs.ErrorDirNotFound:
+ case errors.Is(err, fs.ErrorDirNotFound):
os.Exit(exitcode.DirNotFound)
- case unwrapped == fs.ErrorObjectNotFound:
+ case errors.Is(err, fs.ErrorObjectNotFound):
os.Exit(exitcode.FileNotFound)
- case unwrapped == errorUncategorized:
+ case errors.Is(err, errorUncategorized):
os.Exit(exitcode.UncategorizedError)
- case unwrapped == accounting.ErrorMaxTransferLimitReached:
+ case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
os.Exit(exitcode.TransferExceeded)
case fserrors.ShouldRetry(err):
os.Exit(exitcode.RetryError)
diff --git a/cmd/cmount/fs.go b/cmd/cmount/fs.go
index 6545c058d..d627a9829 100644
--- a/cmd/cmount/fs.go
+++ b/cmd/cmount/fs.go
@@ -14,9 +14,9 @@ import (
"time"
"github.com/billziss-gh/cgofuse/fuse"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
+ "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
)
@@ -569,7 +569,8 @@ func translateError(err error) (errc int) {
if err == nil {
return 0
}
- switch errors.Cause(err) {
+ _, uErr := fserrors.Cause(err)
+ switch uErr {
case vfs.OK:
return 0
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
diff --git a/cmd/cmount/mount.go b/cmd/cmount/mount.go
index 3070763ba..42b1c51ab 100644
--- a/cmd/cmount/mount.go
+++ b/cmd/cmount/mount.go
@@ -10,6 +10,7 @@
package cmount
import (
+ "errors"
"fmt"
"os"
"runtime"
@@ -18,7 +19,6 @@ import (
"time"
"github.com/billziss-gh/cgofuse/fuse"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
@@ -176,7 +176,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
go func() {
defer func() {
if r := recover(); r != nil {
- errChan <- errors.Errorf("mount failed: %v", r)
+ errChan <- fmt.Errorf("mount failed: %v", r)
}
}()
var err error
@@ -224,7 +224,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
// system didn't blow up before starting
select {
case err := <-errChan:
- err = errors.Wrap(err, "mount stopped before calling Init")
+ err = fmt.Errorf("mount stopped before calling Init: %w", err)
return nil, nil, err
case <-fsys.ready:
}
diff --git a/cmd/cmount/mount_brew.go b/cmd/cmount/mount_brew.go
index e30f4dab6..26531ff7a 100644
--- a/cmd/cmount/mount_brew.go
+++ b/cmd/cmount/mount_brew.go
@@ -7,7 +7,8 @@
package cmount
import (
- "github.com/pkg/errors"
+ "errors"
+
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/vfs"
)
diff --git a/cmd/cmount/mountpoint_other.go b/cmd/cmount/mountpoint_other.go
index aa1f46336..1061d096a 100644
--- a/cmd/cmount/mountpoint_other.go
+++ b/cmd/cmount/mountpoint_other.go
@@ -4,16 +4,17 @@
package cmount
import (
+ "errors"
+ "fmt"
"os"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
)
func getMountpoint(mountPath string, opt *mountlib.Options) (string, error) {
fi, err := os.Stat(mountPath)
if err != nil {
- return "", errors.Wrap(err, "failed to retrieve mount path information")
+ return "", fmt.Errorf("failed to retrieve mount path information: %w", err)
}
if !fi.IsDir() {
return "", errors.New("mount path is not a directory")
diff --git a/cmd/cmount/mountpoint_windows.go b/cmd/cmount/mountpoint_windows.go
index d70bc30af..8a06c88b4 100644
--- a/cmd/cmount/mountpoint_windows.go
+++ b/cmd/cmount/mountpoint_windows.go
@@ -4,11 +4,12 @@
package cmount
import (
+ "fmt"
"os"
+ "errors"
"path/filepath"
"regexp"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/file"
@@ -100,7 +101,7 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
if _, err := os.Stat(mountpath); err == nil {
return "", errors.New("mountpoint path already exists: " + mountpath)
} else if !os.IsNotExist(err) {
- return "", errors.Wrap(err, "failed to retrieve mountpoint path information")
+ return "", fmt.Errorf("failed to retrieve mountpoint path information: %w", err)
}
if isDriveRootPath(mountpath) { // Assume intention with "X:\" was "X:"
mountpath = mountpath[:len(mountpath)-1] // WinFsp needs drive mountpoints without trailing path separator
@@ -115,14 +116,14 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
}
var err error
if mountpath, err = filepath.Abs(mountpath); err != nil { // Ensures parent is found but also more informative log messages
- return "", errors.Wrap(err, "mountpoint path is not valid: "+mountpath)
+ return "", fmt.Errorf("mountpoint path is not valid: %s: %w", mountpath, err)
}
parent := filepath.Join(mountpath, "..")
if _, err = os.Stat(parent); err != nil {
if os.IsNotExist(err) {
return "", errors.New("parent of mountpoint directory does not exist: " + parent)
}
- return "", errors.Wrap(err, "failed to retrieve mountpoint directory parent information")
+ return "", fmt.Errorf("failed to retrieve mountpoint directory parent information: %w", err)
}
}
return mountpath, nil
diff --git a/cmd/config/config.go b/cmd/config/config.go
index 1c9d31ed5..96734e552 100644
--- a/cmd/config/config.go
+++ b/cmd/config/config.go
@@ -3,12 +3,12 @@ package config
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
"sort"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -398,11 +398,11 @@ To reconnect use "rclone config reconnect".
f := cmd.NewFsSrc(args)
doDisconnect := f.Features().Disconnect
if doDisconnect == nil {
- return errors.Errorf("%v doesn't support Disconnect", f)
+ return fmt.Errorf("%v doesn't support Disconnect", f)
}
err := doDisconnect(context.Background())
if err != nil {
- return errors.Wrap(err, "Disconnect call failed")
+ return fmt.Errorf("Disconnect call failed: %w", err)
}
return nil
},
@@ -428,11 +428,11 @@ system.
f := cmd.NewFsSrc(args)
doUserInfo := f.Features().UserInfo
if doUserInfo == nil {
- return errors.Errorf("%v doesn't support UserInfo", f)
+ return fmt.Errorf("%v doesn't support UserInfo", f)
}
u, err := doUserInfo(context.Background())
if err != nil {
- return errors.Wrap(err, "UserInfo call failed")
+ return fmt.Errorf("UserInfo call failed: %w", err)
}
if jsonOutput {
out := json.NewEncoder(os.Stdout)
diff --git a/cmd/cryptcheck/cryptcheck.go b/cmd/cryptcheck/cryptcheck.go
index b803beb5d..7ef0e86a4 100644
--- a/cmd/cryptcheck/cryptcheck.go
+++ b/cmd/cryptcheck/cryptcheck.go
@@ -2,8 +2,8 @@ package cryptcheck
import (
"context"
+ "fmt"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/check"
@@ -60,13 +60,13 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
// Check to see fcrypt is a crypt
fcrypt, ok := fdst.(*crypt.Fs)
if !ok {
- return errors.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root())
+ return fmt.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root())
}
// Find a hash to use
funderlying := fcrypt.UnWrap()
hashType := funderlying.Hashes().GetOne()
if hashType == hash.None {
- return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
+ return fmt.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
}
fs.Infof(nil, "Using %v for hash comparisons", hashType)
@@ -85,20 +85,20 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
if err != nil {
- return true, false, errors.Wrapf(err, "error reading hash from underlying %v", underlyingDst)
+ return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
}
if underlyingHash == "" {
return false, true, nil
}
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
if err != nil {
- return true, false, errors.Wrap(err, "error computing hash")
+ return true, false, fmt.Errorf("error computing hash: %w", err)
}
if cryptHash == "" {
return false, true, nil
}
if cryptHash != underlyingHash {
- err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
+ err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
fs.Errorf(src, err.Error())
return true, false, nil
}
diff --git a/cmd/deletefile/deletefile.go b/cmd/deletefile/deletefile.go
index 9e8731aa4..88750c75e 100644
--- a/cmd/deletefile/deletefile.go
+++ b/cmd/deletefile/deletefile.go
@@ -2,8 +2,8 @@ package deletefile
import (
"context"
+ "fmt"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -26,7 +26,7 @@ it will always be removed.
fs, fileName := cmd.NewFsFile(args[0])
cmd.Run(true, false, command, func() error {
if fileName == "" {
- return errors.Errorf("%s is a directory or doesn't exist", args[0])
+ return fmt.Errorf("%s is a directory or doesn't exist", args[0])
}
fileObj, err := fs.NewObject(context.Background(), fileName)
if err != nil {
diff --git a/cmd/hashsum/hashsum.go b/cmd/hashsum/hashsum.go
index 509af664c..3988596a4 100644
--- a/cmd/hashsum/hashsum.go
+++ b/cmd/hashsum/hashsum.go
@@ -2,10 +2,10 @@ package hashsum
import (
"context"
+ "errors"
"fmt"
"os"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
@@ -41,7 +41,7 @@ func AddHashFlags(cmdFlags *pflag.FlagSet) {
func GetHashsumOutput(filename string) (out *os.File, close func(), err error) {
out, err = os.Create(filename)
if err != nil {
- err = errors.Wrapf(err, "Failed to open output file %v", filename)
+ err = fmt.Errorf("Failed to open output file %v: %w", filename, err)
return nil, nil, err
}
diff --git a/cmd/lsf/lsf.go b/cmd/lsf/lsf.go
index 6ce79a34a..a461a5c9c 100644
--- a/cmd/lsf/lsf.go
+++ b/cmd/lsf/lsf.go
@@ -6,7 +6,6 @@ import (
"io"
"os"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/ls/lshelp"
"github.com/rclone/rclone/fs"
@@ -200,7 +199,7 @@ func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error {
case 'T':
list.AddTier()
default:
- return errors.Errorf("Unknown format character %q", char)
+ return fmt.Errorf("Unknown format character %q", char)
}
}
diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go
index c2bb8aace..47bafa4e3 100644
--- a/cmd/lsjson/lsjson.go
+++ b/cmd/lsjson/lsjson.go
@@ -6,7 +6,6 @@ import (
"fmt"
"os"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/ls/lshelp"
"github.com/rclone/rclone/fs"
@@ -125,11 +124,11 @@ can be processed line by line as each item is written one to a line.
}
out, err := json.MarshalIndent(item, "", "\t")
if err != nil {
- return errors.Wrap(err, "failed to marshal list object")
+ return fmt.Errorf("failed to marshal list object: %w", err)
}
_, err = os.Stdout.Write(out)
if err != nil {
- return errors.Wrap(err, "failed to write to output")
+ return fmt.Errorf("failed to write to output: %w", err)
}
fmt.Println()
} else {
@@ -138,7 +137,7 @@ can be processed line by line as each item is written one to a line.
err := operations.ListJSON(context.Background(), fsrc, remote, &opt, func(item *operations.ListJSONItem) error {
out, err := json.Marshal(item)
if err != nil {
- return errors.Wrap(err, "failed to marshal list object")
+ return fmt.Errorf("failed to marshal list object: %w", err)
}
if first {
first = false
@@ -147,7 +146,7 @@ can be processed line by line as each item is written one to a line.
}
_, err = os.Stdout.Write(out)
if err != nil {
- return errors.Wrap(err, "failed to write to output")
+ return fmt.Errorf("failed to write to output: %w", err)
}
return nil
})
diff --git a/cmd/mount/dir.go b/cmd/mount/dir.go
index 9da7a1a64..2acb0c48e 100644
--- a/cmd/mount/dir.go
+++ b/cmd/mount/dir.go
@@ -5,13 +5,13 @@ package mount
import (
"context"
+ "fmt"
"io"
"os"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
@@ -199,7 +199,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
destDir, ok := newDir.(*Dir)
if !ok {
- return errors.Errorf("Unknown Dir type %T", newDir)
+ return fmt.Errorf("Unknown Dir type %T", newDir)
}
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
diff --git a/cmd/mount/fs.go b/cmd/mount/fs.go
index 0ecd4aaa4..8e9dad751 100644
--- a/cmd/mount/fs.go
+++ b/cmd/mount/fs.go
@@ -11,9 +11,9 @@ import (
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
+ "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
)
@@ -77,7 +77,8 @@ func translateError(err error) error {
if err == nil {
return nil
}
- switch errors.Cause(err) {
+ _, uErr := fserrors.Cause(err)
+ switch uErr {
case vfs.OK:
return nil
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
diff --git a/cmd/mount2/fs.go b/cmd/mount2/fs.go
index 94e70a60c..0ab28f05a 100644
--- a/cmd/mount2/fs.go
+++ b/cmd/mount2/fs.go
@@ -10,9 +10,9 @@ import (
"syscall"
"github.com/hanwen/go-fuse/v2/fuse"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
+ "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
)
@@ -104,7 +104,8 @@ func translateError(err error) syscall.Errno {
if err == nil {
return 0
}
- switch errors.Cause(err) {
+ _, uErr := fserrors.Cause(err)
+ switch uErr {
case vfs.OK:
return 0
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
diff --git a/cmd/mountlib/check_linux.go b/cmd/mountlib/check_linux.go
index bad2dee8d..3cdb34690 100644
--- a/cmd/mountlib/check_linux.go
+++ b/cmd/mountlib/check_linux.go
@@ -4,12 +4,13 @@
package mountlib
import (
+ "errors"
+ "fmt"
"path/filepath"
"strings"
"time"
"github.com/artyom/mtab"
- "github.com/pkg/errors"
)
const (
@@ -25,16 +26,16 @@ func CheckMountEmpty(mountpoint string) error {
mountpointAbs, err := filepath.Abs(mountpoint)
if err != nil {
- return errors.Wrapf(err, "cannot get absolute path: %s", mountpoint)
+ return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err)
}
entries, err := mtab.Entries(mtabPath)
if err != nil {
- return errors.Wrapf(err, "cannot read %s", mtabPath)
+ return fmt.Errorf("cannot read %s: %w", mtabPath, err)
}
for _, entry := range entries {
if entry.Dir == mountpointAbs && entry.Type != "autofs" {
- return errors.Errorf(msg, mountpointAbs)
+ return fmt.Errorf(msg, mountpointAbs)
}
}
return nil
@@ -45,11 +46,11 @@ func CheckMountEmpty(mountpoint string) error {
func CheckMountReady(mountpoint string) error {
mountpointAbs, err := filepath.Abs(mountpoint)
if err != nil {
- return errors.Wrapf(err, "cannot get absolute path: %s", mountpoint)
+ return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err)
}
entries, err := mtab.Entries(mtabPath)
if err != nil {
- return errors.Wrapf(err, "cannot read %s", mtabPath)
+ return fmt.Errorf("cannot read %s: %w", mtabPath, err)
}
for _, entry := range entries {
if entry.Dir == mountpointAbs && strings.Contains(entry.Type, "rclone") {
diff --git a/cmd/mountlib/check_other.go b/cmd/mountlib/check_other.go
index cfcbd133c..0b7eeede7 100644
--- a/cmd/mountlib/check_other.go
+++ b/cmd/mountlib/check_other.go
@@ -4,11 +4,11 @@
package mountlib
import (
+ "fmt"
"io"
"os"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -17,7 +17,7 @@ import (
func CheckMountEmpty(mountpoint string) error {
fp, err := os.Open(mountpoint)
if err != nil {
- return errors.Wrapf(err, "Can not open: %s", mountpoint)
+ return fmt.Errorf("Can not open: %s: %w", mountpoint, err)
}
defer fs.CheckClose(fp, &err)
@@ -28,9 +28,9 @@ func CheckMountEmpty(mountpoint string) error {
const msg = "Directory is not empty, use --allow-non-empty to mount anyway: %s"
if err == nil {
- return errors.Errorf(msg, mountpoint)
+ return fmt.Errorf(msg, mountpoint)
}
- return errors.Wrapf(err, msg, mountpoint)
+ return fmt.Errorf(msg+": %w", mountpoint, err)
}
// CheckMountReady should check if mountpoint is mounted by rclone.
diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go
index 3a20540ea..60d2661e7 100644
--- a/cmd/mountlib/mount.go
+++ b/cmd/mountlib/mount.go
@@ -2,6 +2,7 @@ package mountlib
import (
"context"
+ "fmt"
"log"
"os"
"runtime"
@@ -21,7 +22,6 @@ import (
"github.com/rclone/rclone/vfs/vfsflags"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
- "github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -248,7 +248,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
if err != nil {
- return nil, errors.Wrap(err, "failed to mount FUSE fs")
+ return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
}
return nil, nil
}
@@ -277,7 +277,7 @@ func (m *MountPoint) Wait() error {
// Notify systemd
if err := sysdnotify.Ready(); err != nil {
- return errors.Wrap(err, "failed to notify systemd")
+ return fmt.Errorf("failed to notify systemd: %w", err)
}
// Reload VFS cache on SIGHUP
@@ -305,7 +305,7 @@ func (m *MountPoint) Wait() error {
finalise()
if err != nil {
- return errors.Wrap(err, "failed to umount FUSE fs")
+ return fmt.Errorf("failed to umount FUSE fs: %w", err)
}
return nil
}
diff --git a/cmd/mountlib/rc.go b/cmd/mountlib/rc.go
index 1f613da38..2262ecfa8 100644
--- a/cmd/mountlib/rc.go
+++ b/cmd/mountlib/rc.go
@@ -2,12 +2,12 @@ package mountlib
import (
"context"
+ "errors"
"log"
"sort"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
diff --git a/cmd/mountlib/utils.go b/cmd/mountlib/utils.go
index 510bd87aa..673903317 100644
--- a/cmd/mountlib/utils.go
+++ b/cmd/mountlib/utils.go
@@ -1,11 +1,11 @@
package mountlib
import (
+ "fmt"
"path/filepath"
"runtime"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -42,7 +42,7 @@ func (m *MountPoint) CheckOverlap() error {
mountpointAbs := absPath(m.MountPoint)
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
const msg = "mount point %q and directory to be mounted %q mustn't overlap"
- return errors.Errorf(msg, m.MountPoint, m.Fs.Root())
+ return fmt.Errorf(msg, m.MountPoint, m.Fs.Root())
}
return nil
}
diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go
index 38f97d659..2eec715b0 100644
--- a/cmd/ncdu/ncdu.go
+++ b/cmd/ncdu/ncdu.go
@@ -16,7 +16,6 @@ import (
"github.com/atotto/clipboard"
runewidth "github.com/mattn/go-runewidth"
termbox "github.com/nsf/termbox-go"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/ncdu/scan"
"github.com/rclone/rclone/fs"
@@ -314,7 +313,7 @@ func (u *UI) Draw() error {
// Plot
err := termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
if err != nil {
- return errors.Wrap(err, "failed to clear screen")
+ return fmt.Errorf("failed to clear screen: %w", err)
}
// Header line
@@ -432,7 +431,7 @@ func (u *UI) Draw() error {
}
err = termbox.Flush()
if err != nil {
- return errors.Wrap(err, "failed to flush screen")
+ return fmt.Errorf("failed to flush screen: %w", err)
}
return nil
}
@@ -742,7 +741,7 @@ func NewUI(f fs.Fs) *UI {
func (u *UI) Show() error {
err := termbox.Init()
if err != nil {
- return errors.Wrap(err, "termbox init")
+ return fmt.Errorf("termbox init: %w", err)
}
defer termbox.Close()
@@ -766,7 +765,7 @@ outer:
//Reset()
err := u.Draw()
if err != nil {
- return errors.Wrap(err, "draw failed")
+ return fmt.Errorf("draw failed: %w", err)
}
var root *scan.Dir
select {
@@ -775,7 +774,7 @@ outer:
u.setCurrentDir(root)
case err := <-errChan:
if err != nil {
- return errors.Wrap(err, "ncdu directory listing")
+ return fmt.Errorf("ncdu directory listing: %w", err)
}
u.listing = false
case <-updated:
diff --git a/cmd/ncdu/scan/scan.go b/cmd/ncdu/scan/scan.go
index 77b15ff8e..292e78426 100644
--- a/cmd/ncdu/scan/scan.go
+++ b/cmd/ncdu/scan/scan.go
@@ -3,10 +3,10 @@ package scan
import (
"context"
+ "fmt"
"path"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
)
@@ -185,7 +185,7 @@ func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) {
var ok bool
parent, ok = parents[parentPath]
if !ok {
- errChan <- errors.Errorf("couldn't find parent for %q", dirPath)
+ errChan <- fmt.Errorf("couldn't find parent for %q", dirPath)
}
}
d := newDir(parent, dirPath, entries, err)
@@ -202,7 +202,7 @@ func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) {
return nil
})
if err != nil {
- errChan <- errors.Wrap(err, "ncdu listing failed")
+ errChan <- fmt.Errorf("ncdu listing failed: %w", err)
}
errChan <- nil
}()
diff --git a/cmd/rc/rc.go b/cmd/rc/rc.go
index 92ae1fdf9..c2eb7a24f 100644
--- a/cmd/rc/rc.go
+++ b/cmd/rc/rc.go
@@ -4,13 +4,13 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
@@ -163,16 +163,16 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
if loopback {
call := rc.Calls.Get(path)
if call == nil {
- return nil, errors.Errorf("method %q not found", path)
+ return nil, fmt.Errorf("method %q not found", path)
}
_, out, err := jobs.NewJob(ctx, call.Fn, in)
if err != nil {
- return nil, errors.Wrap(err, "loopback call failed")
+ return nil, fmt.Errorf("loopback call failed: %w", err)
}
// Reshape (serialize then deserialize) the data so it is in the form expected
err = rc.Reshape(&out, out)
if err != nil {
- return nil, errors.Wrap(err, "loopback reshape failed")
+ return nil, fmt.Errorf("loopback reshape failed: %w", err)
}
return out, nil
}
@@ -182,12 +182,12 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
url += path
data, err := json.Marshal(in)
if err != nil {
- return nil, errors.Wrap(err, "failed to encode JSON")
+ return nil, fmt.Errorf("failed to encode JSON: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(data))
if err != nil {
- return nil, errors.Wrap(err, "failed to make request")
+ return nil, fmt.Errorf("failed to make request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@@ -197,7 +197,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
resp, err := client.Do(req)
if err != nil {
- return nil, errors.Wrap(err, "connection failed")
+ return nil, fmt.Errorf("connection failed: %w", err)
}
defer fs.CheckClose(resp.Body, &err)
@@ -211,19 +211,19 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
bodyString = err.Error()
}
bodyString = strings.TrimSpace(bodyString)
- return nil, errors.Errorf("Failed to read rc response: %s: %s", resp.Status, bodyString)
+ return nil, fmt.Errorf("Failed to read rc response: %s: %s", resp.Status, bodyString)
}
// Parse output
out = make(rc.Params)
err = json.NewDecoder(resp.Body).Decode(&out)
if err != nil {
- return nil, errors.Wrap(err, "failed to decode JSON")
+ return nil, fmt.Errorf("failed to decode JSON: %w", err)
}
// Check we got 200 OK
if resp.StatusCode != http.StatusOK {
- err = errors.Errorf("operation %q failed: %v", path, out["error"])
+ err = fmt.Errorf("operation %q failed: %v", path, out["error"])
}
return out, err
@@ -240,7 +240,7 @@ func run(ctx context.Context, args []string) (err error) {
for _, param := range params {
equals := strings.IndexRune(param, '=')
if equals < 0 {
- return errors.Errorf("no '=' found in parameter %q", param)
+ return fmt.Errorf("no '=' found in parameter %q", param)
}
key, value := param[:equals], param[equals+1:]
in[key] = value
@@ -251,7 +251,7 @@ func run(ctx context.Context, args []string) (err error) {
}
err = json.Unmarshal([]byte(jsonInput), &in)
if err != nil {
- return errors.Wrap(err, "bad --json input")
+ return fmt.Errorf("bad --json input: %w", err)
}
}
if len(options) > 0 {
@@ -268,7 +268,7 @@ func run(ctx context.Context, args []string) (err error) {
if out != nil && !noOutput {
err := rc.WriteJSON(os.Stdout, out)
if err != nil {
- return errors.Wrap(err, "failed to output JSON")
+ return fmt.Errorf("failed to output JSON: %w", err)
}
}
@@ -279,7 +279,7 @@ func run(ctx context.Context, args []string) (err error) {
func list(ctx context.Context) error {
list, err := doCall(ctx, "rc/list", nil)
if err != nil {
- return errors.Wrap(err, "failed to list")
+ return fmt.Errorf("failed to list: %w", err)
}
commands, ok := list["commands"].([]interface{})
if !ok {
diff --git a/cmd/selfupdate/selfupdate.go b/cmd/selfupdate/selfupdate.go
index c87561c45..f15bce83c 100644
--- a/cmd/selfupdate/selfupdate.go
+++ b/cmd/selfupdate/selfupdate.go
@@ -10,6 +10,7 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -22,7 +23,6 @@ import (
"runtime"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/fs"
@@ -125,7 +125,7 @@ func GetVersion(ctx context.Context, beta bool, version string) (newVersion, sit
if strings.Count(newVersion, ".") == 1 {
html, err := downloadFile(ctx, siteURL)
if err != nil {
- return "", siteURL, errors.Wrap(err, "failed to get list of releases")
+ return "", siteURL, fmt.Errorf("failed to get list of releases: %w", err)
}
reSubver := fmt.Sprintf(`href="\./%s\.\d+/"`, regexp.QuoteMeta(newVersion))
allSubvers := regexp.MustCompile(reSubver).FindAllString(string(html), -1)
@@ -154,7 +154,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version)
if err != nil {
- return errors.Wrap(err, "unable to detect new version")
+ return fmt.Errorf("unable to detect new version: %w", err)
}
oldVersion := fs.Version
@@ -179,7 +179,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
// Get the current executable path
executable, err := os.Executable()
if err != nil {
- return errors.Wrap(err, "unable to find executable")
+ return fmt.Errorf("unable to find executable: %w", err)
}
targetFile := opt.Output
@@ -217,7 +217,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
// Download the update as a temporary file
err = downloadUpdate(ctx, opt.Beta, newVersion, siteURL, newFile, "zip")
if err != nil {
- return errors.Wrap(err, "failed to update rclone")
+ return fmt.Errorf("failed to update rclone: %w", err)
}
err = replaceExecutable(targetFile, newFile, savedFile)
@@ -230,7 +230,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
func installPackage(ctx context.Context, beta bool, version, siteURL, packageFormat string) error {
tempFile, err := ioutil.TempFile("", "rclone.*."+packageFormat)
if err != nil {
- return errors.Wrap(err, "unable to write temporary package")
+ return fmt.Errorf("unable to write temporary package: %w", err)
}
packageFile := tempFile.Name()
_ = tempFile.Close()
@@ -262,7 +262,7 @@ func replaceExecutable(targetFile, newFile, savedFile string) error {
fileInfo, err := os.Lstat(targetFile)
if err == nil {
if err = os.Chmod(newFile, fileInfo.Mode()); err != nil {
- return errors.Wrap(err, "failed to set permission")
+ return fmt.Errorf("failed to set permission: %w", err)
}
}
@@ -361,7 +361,7 @@ func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, p
if packageFormat == "deb" || packageFormat == "rpm" {
if err := ioutil.WriteFile(newFile, archiveBuf, 0644); err != nil {
- return errors.Wrap(err, "cannot write temporary ."+packageFormat)
+ return fmt.Errorf("cannot write temporary .%s: %w", packageFormat, err)
}
return nil
}
diff --git a/cmd/selfupdate/verify.go b/cmd/selfupdate/verify.go
index 9cc0cf9d5..74f87eab5 100644
--- a/cmd/selfupdate/verify.go
+++ b/cmd/selfupdate/verify.go
@@ -6,10 +6,10 @@ package selfupdate
import (
"bytes"
"context"
+ "errors"
"fmt"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/clearsign"
diff --git a/cmd/serve/dlna/cds.go b/cmd/serve/dlna/cds.go
index 87b676142..f3daf0cd8 100644
--- a/cmd/serve/dlna/cds.go
+++ b/cmd/serve/dlna/cds.go
@@ -3,6 +3,7 @@ package dlna
import (
"context"
"encoding/xml"
+ "errors"
"fmt"
"log"
"net/http"
@@ -15,7 +16,6 @@ import (
"github.com/anacrolix/dms/dlna"
"github.com/anacrolix/dms/upnp"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/dlna/upnpav"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/vfs"
diff --git a/cmd/serve/dlna/data/data.go b/cmd/serve/dlna/data/data.go
index 7e71f0083..8e142b99f 100644
--- a/cmd/serve/dlna/data/data.go
+++ b/cmd/serve/dlna/data/data.go
@@ -4,10 +4,10 @@
package data
import (
+ "fmt"
"io/ioutil"
"text/template"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -15,21 +15,21 @@ import (
func GetTemplate() (tpl *template.Template, err error) {
templateFile, err := Assets.Open("rootDesc.xml.tmpl")
if err != nil {
- return nil, errors.Wrap(err, "get template open")
+ return nil, fmt.Errorf("get template open: %w", err)
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
- return nil, errors.Wrap(err, "get template read")
+ return nil, fmt.Errorf("get template read: %w", err)
}
var templateString = string(templateBytes)
tpl, err = template.New("rootDesc").Parse(templateString)
if err != nil {
- return nil, errors.Wrap(err, "get template parse")
+ return nil, fmt.Errorf("get template parse: %w", err)
}
return
diff --git a/cmd/serve/docker/driver.go b/cmd/serve/docker/driver.go
index eb8207f52..6b1db258c 100644
--- a/cmd/serve/docker/driver.go
+++ b/cmd/serve/docker/driver.go
@@ -3,6 +3,7 @@ package docker
import (
"context"
"encoding/json"
+ "fmt"
"io/ioutil"
"math/rand"
"os"
@@ -13,7 +14,6 @@ import (
"time"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
@@ -44,12 +44,12 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp
cacheDir := config.GetCacheDir()
err := file.MkdirAll(cacheDir, 0700)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create cache directory: %s", cacheDir)
+ return nil, fmt.Errorf("failed to create cache directory: %s: %w", cacheDir, err)
}
//err = file.MkdirAll(root, 0755)
if err != nil {
- return nil, errors.Wrapf(err, "failed to create mount root: %s", root)
+ return nil, fmt.Errorf("failed to create mount root: %s: %w", root, err)
}
// setup driver state
@@ -72,7 +72,7 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp
// restore from saved state
if !forgetState {
if err = drv.restoreState(ctx); err != nil {
- return nil, errors.Wrap(err, "failed to restore state")
+ return nil, fmt.Errorf("failed to restore state: %w", err)
}
}
@@ -89,7 +89,7 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp
// notify systemd
if err := sysdnotify.Ready(); err != nil {
- return nil, errors.Wrap(err, "failed to notify systemd")
+ return nil, fmt.Errorf("failed to notify systemd: %w", err)
}
return drv, nil
@@ -323,7 +323,7 @@ func (drv *Driver) saveState() error {
data, err := json.Marshal(state)
if err != nil {
- return errors.Wrap(err, "failed to marshal state")
+ return fmt.Errorf("failed to marshal state: %w", err)
}
ctx := context.Background()
@@ -335,7 +335,7 @@ func (drv *Driver) saveState() error {
}
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond)
}
- return errors.Wrap(err, "failed to save state")
+ return fmt.Errorf("failed to save state: %w", err)
}
// restoreState recreates volumes from saved driver state
diff --git a/cmd/serve/docker/options.go b/cmd/serve/docker/options.go
index 58c555d65..3b6fb07f1 100644
--- a/cmd/serve/docker/options.go
+++ b/cmd/serve/docker/options.go
@@ -1,6 +1,7 @@
package docker
import (
+ "fmt"
"strconv"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
- "github.com/pkg/errors"
"github.com/spf13/pflag"
)
@@ -62,7 +62,7 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
case "remote", "fs":
p, err := fspath.Parse(str)
if err != nil || p.Name == ":" {
- return errors.Wrapf(err, "cannot parse path %q", str)
+ return fmt.Errorf("cannot parse path %q: %w", str, err)
}
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
vol.Fs = str
@@ -100,7 +100,7 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
}
fsInfo, err := fs.Find(fsType)
if err != nil {
- return errors.Errorf("unknown filesystem type %q", fsType)
+ return fmt.Errorf("unknown filesystem type %q", fsType)
}
// handle remaining options, override fsOpt
@@ -124,21 +124,21 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
ok = true
}
if err != nil {
- return errors.Wrapf(err, "cannot parse option %q", key)
+ return fmt.Errorf("cannot parse option %q: %w", key, err)
}
if !ok {
// try to use as a mount option in mntOpt
ok, err = getMountOption(mntOpt, opt, key)
if ok && err != nil {
- return errors.Wrapf(err, "cannot parse mount option %q", key)
+ return fmt.Errorf("cannot parse mount option %q: %w", key, err)
}
}
if !ok {
// try as a vfs option in vfsOpt
ok, err = getVFSOption(vfsOpt, opt, key)
if ok && err != nil {
- return errors.Wrapf(err, "cannot parse vfs option %q", key)
+ return fmt.Errorf("cannot parse vfs option %q: %w", key, err)
}
}
@@ -149,11 +149,11 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
hasFsPrefix := optWithPrefix != fsOptName
if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil {
fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType)
- return errors.Errorf("unsupported backend option %q", key)
+ return fmt.Errorf("unsupported backend option %q", key)
}
fsOpt[fsOptName], err = opt.GetString(key)
if err != nil {
- return errors.Wrapf(err, "cannot parse backend option %q", key)
+ return fmt.Errorf("cannot parse backend option %q: %w", key, err)
}
}
}
diff --git a/cmd/serve/docker/volume.go b/cmd/serve/docker/volume.go
index 770a5f893..076736684 100644
--- a/cmd/serve/docker/volume.go
+++ b/cmd/serve/docker/volume.go
@@ -2,14 +2,14 @@ package docker
import (
"context"
+ "errors"
+ "fmt"
"os"
"path/filepath"
"runtime"
"sort"
"time"
- "github.com/pkg/errors"
-
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -155,7 +155,7 @@ func (vol *Volume) checkMountpoint() error {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
if err = file.MkdirAll(path, 0700); err != nil {
- return errors.Wrapf(err, "failed to create mountpoint: %s", path)
+ return fmt.Errorf("failed to create mountpoint: %s: %w", path, err)
}
} else if err != nil {
return err
@@ -182,7 +182,7 @@ func (vol *Volume) setup(ctx context.Context) error {
_, mountFn := mountlib.ResolveMountMethod(vol.mountType)
if mountFn == nil {
if vol.mountType != "" {
- return errors.Errorf("unsupported mount type %q", vol.mountType)
+ return fmt.Errorf("unsupported mount type %q", vol.mountType)
}
return errors.New("mount command unsupported by this build")
}
@@ -242,7 +242,7 @@ func (vol *Volume) clearCache() error {
}
root, err := VFS.Root()
if err != nil {
- return errors.Wrapf(err, "error reading root: %v", VFS.Fs())
+ return fmt.Errorf("error reading root: %v: %w", VFS.Fs(), err)
}
root.ForgetAll()
return nil
diff --git a/cmd/serve/ftp/ftp.go b/cmd/serve/ftp/ftp.go
index de7bc4ac6..a2a4df159 100644
--- a/cmd/serve/ftp/ftp.go
+++ b/cmd/serve/ftp/ftp.go
@@ -7,6 +7,7 @@ package ftp
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -16,7 +17,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
diff --git a/cmd/serve/http/data/data.go b/cmd/serve/http/data/data.go
index 461e91663..06eb6a254 100644
--- a/cmd/serve/http/data/data.go
+++ b/cmd/serve/http/data/data.go
@@ -4,11 +4,11 @@
package data
import (
+ "fmt"
"html/template"
"io/ioutil"
"time"
- "github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/rclone/rclone/fs"
@@ -61,14 +61,14 @@ func GetTemplate(tmpl string) (tpl *template.Template, err error) {
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
- return nil, errors.Wrap(err, "get template open")
+ return nil, fmt.Errorf("get template open: %w", err)
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
- return nil, errors.Wrap(err, "get template read")
+ return nil, fmt.Errorf("get template read: %w", err)
}
templateString = string(templateBytes)
@@ -76,7 +76,7 @@ func GetTemplate(tmpl string) (tpl *template.Template, err error) {
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
- return nil, errors.Wrap(err, "get template open")
+ return nil, fmt.Errorf("get template open: %w", err)
}
templateString = string(templateFile)
@@ -87,7 +87,7 @@ func GetTemplate(tmpl string) (tpl *template.Template, err error) {
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
- return nil, errors.Wrap(err, "get template parse")
+ return nil, fmt.Errorf("get template parse: %w", err)
}
return
diff --git a/cmd/serve/httplib/httplib.go b/cmd/serve/httplib/httplib.go
index 40fb62ffb..ae6583af9 100644
--- a/cmd/serve/httplib/httplib.go
+++ b/cmd/serve/httplib/httplib.go
@@ -18,7 +18,6 @@ import (
"time"
auth "github.com/abbot/go-http-auth"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/fs"
)
@@ -322,7 +321,7 @@ func NewServer(handler http.Handler, opt *Options) *Server {
func (s *Server) Serve() error {
ln, err := net.Listen("tcp", s.httpServer.Addr)
if err != nil {
- return errors.Wrapf(err, "start server failed")
+ return fmt.Errorf("start server failed: %w", err)
}
s.listener = ln
s.waitChan = make(chan struct{})
diff --git a/cmd/serve/proxy/proxy.go b/cmd/serve/proxy/proxy.go
index e55233fef..861861ee9 100644
--- a/cmd/serve/proxy/proxy.go
+++ b/cmd/serve/proxy/proxy.go
@@ -7,11 +7,12 @@ import (
"crypto/sha256"
"crypto/subtle"
"encoding/json"
+ "errors"
+ "fmt"
"os/exec"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
@@ -144,7 +145,7 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...)
inBytes, err := json.MarshalIndent(in, "", "\t")
if err != nil {
- return nil, errors.Wrap(err, "Proxy.Call failed to marshal input: %v")
+ return nil, fmt.Errorf("proxy: failed to marshal input: %w", err)
}
var stdout, stderr bytes.Buffer
cmd.Stdin = bytes.NewBuffer(inBytes)
@@ -155,11 +156,11 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
fs.Debugf(nil, "Calling proxy %v", p.cmdLine)
duration := time.Since(start)
if err != nil {
- return nil, errors.Wrapf(err, "proxy: failed on %v: %q", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())))
+ return nil, fmt.Errorf("proxy: failed on %v: %q: %w", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())), err)
}
err = json.Unmarshal(stdout.Bytes(), &config)
if err != nil {
- return nil, errors.Wrapf(err, "proxy: failed to read output: %q", string(stdout.Bytes()))
+ return nil, fmt.Errorf("proxy: failed to read output: %q: %w", string(stdout.Bytes()), err)
}
fs.Debugf(nil, "Proxy returned in %v", duration)
@@ -171,7 +172,7 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
if ok {
obscuredValue, err := obscure.Obscure(value)
if err != nil {
- return nil, errors.Wrap(err, "proxy")
+ return nil, fmt.Errorf("proxy: %w", err)
}
config.Set(key, obscuredValue)
}
@@ -213,7 +214,7 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
// Find the backend
fsInfo, err := fs.Find(fsName)
if err != nil {
- return nil, errors.Wrapf(err, "proxy: couldn't find backend for %q", fsName)
+ return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err)
}
// base name of config on user name. This may appear in logs
@@ -247,7 +248,7 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
return entry, true, nil
})
if err != nil {
- return nil, errors.Wrapf(err, "proxy: failed to create backend")
+ return nil, fmt.Errorf("proxy: failed to create backend: %w", err)
}
return value, nil
}
@@ -269,7 +270,7 @@ func (p *Proxy) Call(user, auth string, isPublicKey bool) (VFS *vfs.VFS, vfsKey
// check we got what we were expecting
entry, ok := value.(cacheEntry)
if !ok {
- return nil, "", errors.Errorf("proxy: value is not cache entry: %#v", value)
+ return nil, "", fmt.Errorf("proxy: value is not cache entry: %#v", value)
}
// Check the password / public key is correct in the cached entry. This
diff --git a/cmd/serve/restic/restic.go b/cmd/serve/restic/restic.go
index a90b3295a..53143957f 100644
--- a/cmd/serve/restic/restic.go
+++ b/cmd/serve/restic/restic.go
@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/flags"
- "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/http/serve"
@@ -386,8 +385,7 @@ func (s *Server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
return nil
})
if err != nil {
- _, err = fserrors.Cause(err)
- if err != fs.ErrorDirNotFound {
+ if !errors.Is(err, fs.ErrorDirNotFound) {
fs.Errorf(remote, "list failed: %#v %T", err, err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
diff --git a/cmd/serve/sftp/connection.go b/cmd/serve/sftp/connection.go
index 15dc1049c..4575980f8 100644
--- a/cmd/serve/sftp/connection.go
+++ b/cmd/serve/sftp/connection.go
@@ -5,6 +5,7 @@ package sftp
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -12,7 +13,6 @@ import (
"regexp"
"strings"
- "github.com/pkg/errors"
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
@@ -74,7 +74,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
}
usage, err := about(ctx)
if err != nil {
- return errors.Wrap(err, "About failed")
+ return fmt.Errorf("About failed: %w", err)
}
total, used, free := int64(-1), int64(-1), int64(-1)
if usage.Total != nil {
@@ -94,7 +94,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
/dev/root %d %d %d %d%% /
`, total, used, free, perc)
if err != nil {
- return errors.Wrap(err, "send output failed")
+ return fmt.Errorf("send output failed: %w", err)
}
case "md5sum", "sha1sum":
ht := hash.MD5
@@ -113,7 +113,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
} else {
node, err := c.vfs.Stat(args)
if err != nil {
- return errors.Wrapf(err, "hash failed finding file %q", args)
+ return fmt.Errorf("hash failed finding file %q: %w", args, err)
}
if node.IsDir() {
return errors.New("can't hash directory")
@@ -124,12 +124,12 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
}
hashSum, err = o.Hash(ctx, ht)
if err != nil {
- return errors.Wrap(err, "hash failed")
+ return fmt.Errorf("hash failed: %w", err)
}
}
_, err = fmt.Fprintf(out, "%s %s\n", hashSum, args)
if err != nil {
- return errors.Wrap(err, "send output failed")
+ return fmt.Errorf("send output failed: %w", err)
}
case "echo":
// special cases for rclone command detection
@@ -138,7 +138,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
if c.vfs.Fs().Hashes().Contains(hash.MD5) {
_, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n")
if err != nil {
- return errors.Wrap(err, "send output failed")
+ return fmt.Errorf("send output failed: %w", err)
}
} else {
return errors.New("md5 hash not supported")
@@ -147,7 +147,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
if c.vfs.Fs().Hashes().Contains(hash.SHA1) {
_, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n")
if err != nil {
- return errors.Wrap(err, "send output failed")
+ return fmt.Errorf("send output failed: %w", err)
}
} else {
return errors.New("sha1 hash not supported")
@@ -155,11 +155,11 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
default:
_, err = fmt.Fprintf(out, "%s\n", args)
if err != nil {
- return errors.Wrap(err, "send output failed")
+ return fmt.Errorf("send output failed: %w", err)
}
}
default:
- return errors.Errorf("%q not implemented\n", command)
+ return fmt.Errorf("%q not implemented", command)
}
return nil
}
@@ -268,7 +268,7 @@ func serveChannel(rwc io.ReadWriteCloser, h sftp.Handlers, what string) error {
}()
err := server.Serve()
if err != nil && err != io.EOF {
- return errors.Wrap(err, "completed with error")
+ return fmt.Errorf("completed with error: %w", err)
}
fs.Debugf(what, "exited session")
return nil
diff --git a/cmd/serve/sftp/server.go b/cmd/serve/sftp/server.go
index 79446607d..d1fa92bf8 100644
--- a/cmd/serve/sftp/server.go
+++ b/cmd/serve/sftp/server.go
@@ -15,6 +15,7 @@ import (
"crypto/x509"
"encoding/base64"
"encoding/pem"
+ "errors"
"fmt"
"io/ioutil"
"net"
@@ -22,7 +23,6 @@ import (
"path/filepath"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
@@ -237,7 +237,7 @@ func (s *server) serve() (err error) {
// If loading a cached key failed, make the keys and retry
err = file.MkdirAll(cachePath, 0700)
if err != nil {
- return errors.Wrap(err, "failed to create cache path")
+ return fmt.Errorf("failed to create cache path: %w", err)
}
if strings.HasSuffix(keyPath, string(os.PathSeparator)+"id_rsa") {
const bits = 2048
@@ -250,10 +250,10 @@ func (s *server) serve() (err error) {
fs.Logf(nil, "Generating Ed25519 key pair at %q", keyPath)
err = makeEd25519SSHKeyPair(keyPath+".pub", keyPath)
} else {
- return errors.Errorf("don't know how to generate key pair %q", keyPath)
+ return fmt.Errorf("don't know how to generate key pair %q", keyPath)
}
if err != nil {
- return errors.Wrap(err, "failed to create SSH key pair")
+ return fmt.Errorf("failed to create SSH key pair: %w", err)
}
// reload the new key
private, err = loadPrivateKey(keyPath)
@@ -270,7 +270,7 @@ func (s *server) serve() (err error) {
// accepted.
s.listener, err = net.Listen("tcp", s.opt.ListenAddr)
if err != nil {
- return errors.Wrap(err, "failed to listen for connection")
+ return fmt.Errorf("failed to listen for connection: %w", err)
}
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
@@ -313,11 +313,11 @@ func (s *server) Close() {
func loadPrivateKey(keyPath string) (ssh.Signer, error) {
privateBytes, err := ioutil.ReadFile(keyPath)
if err != nil {
- return nil, errors.Wrap(err, "failed to load private key")
+ return nil, fmt.Errorf("failed to load private key: %w", err)
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse private key")
+ return nil, fmt.Errorf("failed to parse private key: %w", err)
}
return private, nil
}
@@ -328,13 +328,13 @@ func loadPrivateKey(keyPath string) (ssh.Signer, error) {
func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string]struct{}, err error) {
authorizedKeysBytes, err := ioutil.ReadFile(authorizedKeysPath)
if err != nil {
- return nil, errors.Wrap(err, "failed to load authorized keys")
+ return nil, fmt.Errorf("failed to load authorized keys: %w", err)
}
authorizedKeysMap = make(map[string]struct{})
for len(authorizedKeysBytes) > 0 {
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse authorized keys")
+ return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
}
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
authorizedKeysBytes = bytes.TrimSpace(rest)
diff --git a/cmd/serve/webdav/webdav.go b/cmd/serve/webdav/webdav.go
index 531842936..a618c6c06 100644
--- a/cmd/serve/webdav/webdav.go
+++ b/cmd/serve/webdav/webdav.go
@@ -3,6 +3,8 @@ package webdav
import (
"context"
+ "errors"
+ "fmt"
"net/http"
"os"
"strings"
@@ -16,7 +18,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
- "github.com/rclone/rclone/lib/errors"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
@@ -157,7 +158,7 @@ func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
}
VFS, ok := value.(*vfs.VFS)
if !ok {
- return nil, errors.Errorf("context value is not VFS: %#v", value)
+ return nil, fmt.Errorf("context value is not VFS: %#v", value)
}
return VFS, nil
}
diff --git a/cmd/settier/settier.go b/cmd/settier/settier.go
index da8e54d59..97953727a 100644
--- a/cmd/settier/settier.go
+++ b/cmd/settier/settier.go
@@ -2,8 +2,8 @@ package settier
import (
"context"
+ "fmt"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -47,7 +47,7 @@ Or just provide remote directory and all files in directory will be tiered
cmd.Run(false, false, command, func() error {
isSupported := fsrc.Features().SetTier
if !isSupported {
- return errors.Errorf("Remote %s does not support settier", fsrc.Name())
+ return fmt.Errorf("Remote %s does not support settier", fsrc.Name())
}
return operations.SetTier(context.Background(), fsrc, tier)
diff --git a/cmd/test/info/info.go b/cmd/test/info/info.go
index 6b6b35f07..06cdd74e9 100644
--- a/cmd/test/info/info.go
+++ b/cmd/test/info/info.go
@@ -19,7 +19,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/test"
"github.com/rclone/rclone/cmd/test/info/internal"
@@ -441,7 +440,7 @@ func (r *results) checkStreaming() {
func readInfo(ctx context.Context, f fs.Fs) error {
err := f.Mkdir(ctx, "")
if err != nil {
- return errors.Wrap(err, "couldn't mkdir")
+ return fmt.Errorf("couldn't mkdir: %w", err)
}
r := newResults(ctx, f)
if checkControl {
diff --git a/cmd/touch/touch.go b/cmd/touch/touch.go
index e778fbc7f..34ea22321 100644
--- a/cmd/touch/touch.go
+++ b/cmd/touch/touch.go
@@ -3,9 +3,10 @@ package touch
import (
"bytes"
"context"
+ "errors"
+ "fmt"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
@@ -89,7 +90,7 @@ func timeOfTouch() (time.Time, error) {
if timeAsArgument != "" {
var err error
if t, err = parseTimeArgument(timeAsArgument); err != nil {
- return t, errors.Wrap(err, "failed to parse timestamp argument")
+ return t, fmt.Errorf("failed to parse timestamp argument: %w", err)
}
} else {
t = time.Now()
@@ -114,7 +115,7 @@ func Touch(ctx context.Context, f fs.Fs, fileName string) error {
fs.Debugf(nil, "Touch time %v", t)
file, err := f.NewObject(ctx, fileName)
if err != nil {
- if errors.Cause(err) == fs.ErrorObjectNotFound {
+ if errors.Is(err, fs.ErrorObjectNotFound) {
// Touch single non-existent file
if notCreateNewFile {
fs.Logf(f, "Not touching non-existent file due to --no-create")
@@ -129,10 +130,10 @@ func Touch(ctx context.Context, f fs.Fs, fileName string) error {
}
fs.Debugf(f, "Touching (creating)")
if err = createEmptyObject(ctx, fileName, t, f); err != nil {
- return errors.Wrap(err, "failed to touch (create)")
+ return fmt.Errorf("failed to touch (create): %w", err)
}
}
- if errors.Cause(err) == fs.ErrorIsDir {
+ if errors.Is(err, fs.ErrorIsDir) {
if recursive {
// Touch existing directory, recursive
fs.Debugf(nil, "Touching files in directory recursively")
@@ -149,7 +150,7 @@ func Touch(ctx context.Context, f fs.Fs, fileName string) error {
fs.Debugf(f, "Touching %q", fileName)
err = file.SetModTime(ctx, t)
if err != nil {
- return errors.Wrap(err, "failed to touch")
+ return fmt.Errorf("failed to touch: %w", err)
}
}
return nil
diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go
index 59eaef48e..52fa1d84c 100644
--- a/cmd/tree/tree.go
+++ b/cmd/tree/tree.go
@@ -11,7 +11,6 @@ import (
"time"
"github.com/a8m/tree"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
@@ -100,7 +99,7 @@ short options as they conflict with rclone's short options.
var err error
outFile, err = os.Create(outFileName)
if err != nil {
- return errors.Errorf("failed to create output file: %v", err)
+ return fmt.Errorf("failed to create output file: %v", err)
}
}
opts.VerSort = opts.VerSort || sort == "version"
@@ -207,7 +206,7 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
}
_, entry := dirtree.DirTree(dirs).Find(filePath)
if entry == nil {
- return nil, errors.Errorf("Couldn't find %q in directory cache", filePath)
+ return nil, fmt.Errorf("Couldn't find %q in directory cache", filePath)
}
return &FileInfo{entry}, nil
}
@@ -219,7 +218,7 @@ func (dirs Fs) ReadDir(dir string) (names []string, err error) {
dir = strings.TrimLeft(dir, "/")
entries, ok := dirs[dir]
if !ok {
- return nil, errors.Errorf("Couldn't find directory %q", dir)
+ return nil, fmt.Errorf("Couldn't find directory %q", dir)
}
for _, entry := range entries {
names = append(names, path.Base(entry.Remote()))
diff --git a/cmd/version/version.go b/cmd/version/version.go
index 4ccca007a..3ec69e135 100644
--- a/cmd/version/version.go
+++ b/cmd/version/version.go
@@ -1,6 +1,7 @@
package version
import (
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -8,7 +9,6 @@ import (
"time"
"github.com/coreos/go-semver/semver"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go
index ef079e36c..e3ed4972c 100644
--- a/fs/accounting/accounting.go
+++ b/fs/accounting/accounting.go
@@ -3,6 +3,7 @@ package accounting
import (
"context"
+ "errors"
"fmt"
"io"
"sync"
@@ -11,7 +12,6 @@ import (
"github.com/rclone/rclone/fs/rc"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/asyncreader"
"github.com/rclone/rclone/fs/fserrors"
diff --git a/fs/accounting/stats_test.go b/fs/accounting/stats_test.go
index 3359e3a7b..c22f0031b 100644
--- a/fs/accounting/stats_test.go
+++ b/fs/accounting/stats_test.go
@@ -7,7 +7,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/stretchr/testify/assert"
@@ -104,7 +103,7 @@ func TestStatsError(t *testing.T) {
assert.Equal(t, t0, s.RetryAfter())
assert.Equal(t, e, s.GetLastError())
- err := errors.Wrap(fserrors.ErrorRetryAfter(t1), "potato")
+ err := fmt.Errorf("potato: %w", fserrors.ErrorRetryAfter(t1))
err = s.Error(err)
assert.Equal(t, int64(3), s.GetErrors())
assert.False(t, s.HadFatalError())
diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go
index 5d7c87fba..c875c568f 100644
--- a/fs/accounting/token_bucket.go
+++ b/fs/accounting/token_bucket.go
@@ -2,10 +2,11 @@ package accounting
import (
"context"
+ "errors"
+ "fmt"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"golang.org/x/time/rate"
@@ -201,7 +202,7 @@ func (tb *tokenBucket) rcBwlimit(ctx context.Context, in rc.Params) (out rc.Para
var bws fs.BwTimetable
err = bws.Set(bwlimit)
if err != nil {
- return out, errors.Wrap(err, "bad bwlimit")
+ return out, fmt.Errorf("bad bwlimit: %w", err)
}
if len(bws) != 1 {
return out, errors.New("need exactly 1 bandwidth setting")
diff --git a/fs/asyncreader/asyncreader.go b/fs/asyncreader/asyncreader.go
index 92d389002..44f8fb245 100644
--- a/fs/asyncreader/asyncreader.go
+++ b/fs/asyncreader/asyncreader.go
@@ -4,11 +4,11 @@ package asyncreader
import (
"context"
+ "errors"
"io"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
diff --git a/fs/backend_config.go b/fs/backend_config.go
index 97db87202..8d24f523d 100644
--- a/fs/backend_config.go
+++ b/fs/backend_config.go
@@ -6,11 +6,11 @@ package fs
import (
"context"
+ "errors"
"fmt"
"strconv"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
)
@@ -188,7 +188,7 @@ func ConfigConfirm(state string, Default bool, name string, help string) (*Confi
// If there is only one item it will short cut to the next state
func ConfigChooseFixed(state string, name string, help string, items []OptionExample) (*ConfigOut, error) {
if len(items) == 0 {
- return nil, errors.Errorf("no items found in: %s", help)
+ return nil, fmt.Errorf("no items found in: %s", help)
}
choose := &ConfigOut{
State: state,
@@ -323,7 +323,7 @@ func configAll(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo
if stateNumber != "" {
optionNumber, err = strconv.Atoi(stateNumber)
if err != nil {
- return nil, errors.Wrap(err, "internal error: bad state number")
+ return nil, fmt.Errorf("internal error: bad state number: %w", err)
}
}
@@ -393,7 +393,7 @@ func configAll(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo
}
return ConfigGoto("*postconfig")
}
- return nil, errors.Errorf("internal error: bad state %q", state)
+ return nil, fmt.Errorf("internal error: bad state %q", state)
}
func backendConfigStep(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, choices configmap.Getter, in ConfigIn) (out *ConfigOut, err error) {
@@ -415,7 +415,7 @@ func backendConfigStep(ctx context.Context, name string, m configmap.Mapper, ri
in.State = ""
return backendConfigStep(ctx, name, m, ri, choices, in)
case strings.HasPrefix(in.State, "*"):
- err = errors.Errorf("unknown internal state %q", in.State)
+ err = fmt.Errorf("unknown internal state %q", in.State)
default:
// Otherwise pass to backend
if ri.Config == nil {
diff --git a/fs/bwtimetable.go b/fs/bwtimetable.go
index f0056e15d..5cf76ee26 100644
--- a/fs/bwtimetable.go
+++ b/fs/bwtimetable.go
@@ -2,12 +2,11 @@ package fs
import (
"encoding/json"
+ "errors"
"fmt"
"strconv"
"strings"
"time"
-
- "github.com/pkg/errors"
)
// BwPair represents an upload and a download bandwidth
@@ -84,21 +83,21 @@ func (x BwTimetable) String() string {
// Basic hour format checking
func validateHour(HHMM string) error {
if len(HHMM) != 5 {
- return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
+ return fmt.Errorf("invalid time specification (hh:mm): %q", HHMM)
}
hh, err := strconv.Atoi(HHMM[0:2])
if err != nil {
- return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
+ return fmt.Errorf("invalid hour in time specification %q: %v", HHMM, err)
}
if hh < 0 || hh > 23 {
- return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
+ return fmt.Errorf("invalid hour (must be between 00 and 23): %q", hh)
}
mm, err := strconv.Atoi(HHMM[3:])
if err != nil {
- return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
+ return fmt.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
}
if mm < 0 || mm > 59 {
- return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
+ return fmt.Errorf("invalid minute (must be between 00 and 59): %q", hh)
}
return nil
}
@@ -127,7 +126,7 @@ func parseWeekday(dayOfWeek string) (int, error) {
if dayOfWeek == "sat" || dayOfWeek == "saturday" {
return 6, nil
}
- return 0, errors.Errorf("invalid weekday: %q", dayOfWeek)
+ return 0, fmt.Errorf("invalid weekday: %q", dayOfWeek)
}
// Set the bandwidth timetable.
@@ -156,7 +155,7 @@ func (x *BwTimetable) Set(s string) error {
// Format must be dayOfWeek-HH:MM,BW
if len(tv) != 2 {
- return errors.Errorf("invalid time/bandwidth specification: %q", tok)
+ return fmt.Errorf("invalid time/bandwidth specification: %q", tok)
}
weekday := 0
@@ -181,7 +180,7 @@ func (x *BwTimetable) Set(s string) error {
} else {
timespec := strings.Split(tv[0], "-")
if len(timespec) != 2 {
- return errors.Errorf("invalid time specification: %q", tv[0])
+ return fmt.Errorf("invalid time specification: %q", tv[0])
}
var err error
weekday, err = parseWeekday(timespec[0])
diff --git a/fs/config.go b/fs/config.go
index 0d43e04f9..480e0a39b 100644
--- a/fs/config.go
+++ b/fs/config.go
@@ -2,13 +2,12 @@ package fs
import (
"context"
+ "errors"
"net"
"os"
"strconv"
"strings"
"time"
-
- "github.com/pkg/errors"
)
// Global
diff --git a/fs/config/authorize.go b/fs/config/authorize.go
index 452e8f8d4..fbfea2e0c 100644
--- a/fs/config/authorize.go
+++ b/fs/config/authorize.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
)
@@ -22,7 +21,7 @@ func Authorize(ctx context.Context, args []string, noAutoBrowser bool) error {
switch len(args) {
case 1, 2, 3:
default:
- return errors.Errorf("invalid number of arguments: %d", len(args))
+ return fmt.Errorf("invalid number of arguments: %d", len(args))
}
Type := args[0] // FIXME could read this from input
ri, err := fs.Find(Type)
@@ -30,7 +29,7 @@ func Authorize(ctx context.Context, args []string, noAutoBrowser bool) error {
return err
}
if ri.Config == nil {
- return errors.Errorf("can't authorize fs %q", Type)
+ return fmt.Errorf("can't authorize fs %q", Type)
}
// Config map for remote
diff --git a/fs/config/config.go b/fs/config/config.go
index a2db8ddfd..a4f8a1644 100644
--- a/fs/config/config.go
+++ b/fs/config/config.go
@@ -4,6 +4,7 @@ package config
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"log"
mathrand "math/rand"
@@ -15,7 +16,6 @@ import (
"time"
"github.com/mitchellh/go-homedir"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -447,7 +447,7 @@ func updateRemote(ctx context.Context, name string, keyValues rc.Params, opt Upd
ri, err := fs.Find(fsType)
if err != nil {
- return nil, errors.Errorf("couldn't find backend for type %q", fsType)
+ return nil, fmt.Errorf("couldn't find backend for type %q", fsType)
}
// Work out which options need to be obscured
@@ -474,7 +474,7 @@ func updateRemote(ctx context.Context, name string, keyValues rc.Params, opt Upd
// or we are forced to obscure
vStr, err = obscure.Obscure(vStr)
if err != nil {
- return nil, errors.Wrap(err, "UpdateRemote: obscure failed")
+ return nil, fmt.Errorf("UpdateRemote: obscure failed: %w", err)
}
}
}
@@ -558,11 +558,11 @@ func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error
func JSONListProviders() error {
b, err := json.MarshalIndent(fs.Registry, "", " ")
if err != nil {
- return errors.Wrap(err, "failed to marshal examples")
+ return fmt.Errorf("failed to marshal examples: %w", err)
}
_, err = os.Stdout.Write(b)
if err != nil {
- return errors.Wrap(err, "failed to write providers list")
+ return fmt.Errorf("failed to write providers list: %w", err)
}
return nil
}
@@ -660,11 +660,11 @@ func Dump() error {
dump := DumpRcBlob()
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
- return errors.Wrap(err, "failed to marshal config dump")
+ return fmt.Errorf("failed to marshal config dump: %w", err)
}
_, err = os.Stdout.Write(b)
if err != nil {
- return errors.Wrap(err, "failed to write config dump")
+ return fmt.Errorf("failed to write config dump: %w", err)
}
return nil
}
diff --git a/fs/config/configfile/configfile.go b/fs/config/configfile/configfile.go
index 6b93d4943..65971492d 100644
--- a/fs/config/configfile/configfile.go
+++ b/fs/config/configfile/configfile.go
@@ -3,6 +3,7 @@ package configfile
import (
"bytes"
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
@@ -10,7 +11,6 @@ import (
"sync"
"github.com/Unknwon/goconfig"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/file"
@@ -106,17 +106,17 @@ func (s *Storage) Save() error {
configPath := config.GetConfigPath()
if configPath == "" {
- return errors.Errorf("Failed to save config file: Path is empty")
+ return fmt.Errorf("Failed to save config file: Path is empty")
}
dir, name := filepath.Split(configPath)
err := file.MkdirAll(dir, os.ModePerm)
if err != nil {
- return errors.Wrap(err, "failed to create config directory")
+ return fmt.Errorf("failed to create config directory: %w", err)
}
f, err := ioutil.TempFile(dir, name)
if err != nil {
- return errors.Errorf("Failed to create temp file for new config: %v", err)
+ return fmt.Errorf("Failed to create temp file for new config: %v", err)
}
defer func() {
_ = f.Close()
@@ -127,7 +127,7 @@ func (s *Storage) Save() error {
var buf bytes.Buffer
if err := goconfig.SaveConfigData(s.gc, &buf); err != nil {
- return errors.Errorf("Failed to save config file: %v", err)
+ return fmt.Errorf("Failed to save config file: %v", err)
}
if err := config.Encrypt(&buf, f); err != nil {
@@ -137,7 +137,7 @@ func (s *Storage) Save() error {
_ = f.Sync()
err = f.Close()
if err != nil {
- return errors.Errorf("Failed to close config file: %v", err)
+ return fmt.Errorf("Failed to close config file: %v", err)
}
var fileMode os.FileMode = 0600
@@ -157,10 +157,10 @@ func (s *Storage) Save() error {
}
if err = os.Rename(configPath, configPath+".old"); err != nil && !os.IsNotExist(err) {
- return errors.Errorf("Failed to move previous config to backup location: %v", err)
+ return fmt.Errorf("Failed to move previous config to backup location: %v", err)
}
if err = os.Rename(f.Name(), configPath); err != nil {
- return errors.Errorf("Failed to move newly written config from %s to final location: %v", f.Name(), err)
+ return fmt.Errorf("Failed to move newly written config from %s to final location: %v", f.Name(), err)
}
if err := os.Remove(configPath + ".old"); err != nil && !os.IsNotExist(err) {
fs.Errorf(nil, "Failed to remove backup config file: %v", err)
@@ -177,7 +177,7 @@ func (s *Storage) Serialize() (string, error) {
s.check()
var buf bytes.Buffer
if err := goconfig.SaveConfigData(s.gc, &buf); err != nil {
- return "", errors.Errorf("Failed to save config file: %v", err)
+ return "", fmt.Errorf("Failed to save config file: %v", err)
}
return buf.String(), nil
diff --git a/fs/config/configmap/configmap.go b/fs/config/configmap/configmap.go
index a37a2e654..516a91992 100644
--- a/fs/config/configmap/configmap.go
+++ b/fs/config/configmap/configmap.go
@@ -4,11 +4,10 @@ package configmap
import (
"encoding/base64"
"encoding/json"
+ "fmt"
"sort"
"strings"
"unicode"
-
- "github.com/pkg/errors"
)
// Priority of getters
@@ -172,7 +171,7 @@ func (c Simple) Encode() (string, error) {
}
buf, err := json.Marshal(c)
if err != nil {
- return "", errors.Wrap(err, "encode simple map")
+ return "", fmt.Errorf("encode simple map: %w", err)
}
return base64.RawStdEncoding.EncodeToString(buf), nil
}
@@ -191,11 +190,11 @@ func (c Simple) Decode(in string) error {
}
decodedM, err := base64.RawStdEncoding.DecodeString(in)
if err != nil {
- return errors.Wrap(err, "decode simple map")
+ return fmt.Errorf("decode simple map: %w", err)
}
err = json.Unmarshal(decodedM, &c)
if err != nil {
- return errors.Wrap(err, "parse simple map")
+ return fmt.Errorf("parse simple map: %w", err)
}
return nil
}
diff --git a/fs/config/configstruct/configstruct.go b/fs/config/configstruct/configstruct.go
index 181f25b5e..637e5e4ef 100644
--- a/fs/config/configstruct/configstruct.go
+++ b/fs/config/configstruct/configstruct.go
@@ -2,12 +2,12 @@
package configstruct
import (
+ "errors"
"fmt"
"reflect"
"regexp"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
)
@@ -35,7 +35,7 @@ func StringToInterface(def interface{}, in string) (newValue interface{}, err er
o := reflect.New(typ)
n, err := fmt.Sscanln(in, o.Interface())
if err != nil {
- return newValue, errors.Wrapf(err, "parsing %q as %T failed", in, def)
+ return newValue, fmt.Errorf("parsing %q as %T failed: %w", in, def, err)
}
if n != 1 {
return newValue, errors.New("no items parsed")
@@ -115,7 +115,7 @@ func Set(config configmap.Getter, opt interface{}) (err error) {
// it isn't valid for all types. This makes
// empty string be the equivalent of unset.
if configValue != "" {
- return errors.Wrapf(err, "couldn't parse config item %q = %q as %T", defaultItem.Name, configValue, defaultItem.Value)
+ return fmt.Errorf("couldn't parse config item %q = %q as %T: %w", defaultItem.Name, configValue, defaultItem.Value, err)
}
} else {
newValue = newNewValue
diff --git a/fs/config/crypt.go b/fs/config/crypt.go
index b6f03fa2c..002e6c49f 100644
--- a/fs/config/crypt.go
+++ b/fs/config/crypt.go
@@ -7,6 +7,7 @@ import (
"crypto/rand"
"crypto/sha256"
"encoding/base64"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -14,7 +15,6 @@ import (
"os/exec"
"strings"
- "github.com/pkg/errors"
"golang.org/x/crypto/nacl/secretbox"
"github.com/rclone/rclone/fs"
@@ -95,12 +95,12 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
if ers := strings.TrimSpace(stderr.String()); ers != "" {
fs.Errorf(nil, "--password-command stderr: %s", ers)
}
- return nil, errors.Wrap(err, "password command failed")
+ return nil, fmt.Errorf("password command failed: %w", err)
}
if pass := strings.Trim(stdout.String(), "\r\n"); pass != "" {
err := SetConfigPassword(pass)
if err != nil {
- return nil, errors.Wrap(err, "incorrect password")
+ return nil, fmt.Errorf("incorrect password: %w", err)
}
} else {
return nil, errors.New("password-command returned empty string")
@@ -130,7 +130,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
dec := base64.NewDecoder(base64.StdEncoding, r)
box, err := ioutil.ReadAll(dec)
if err != nil {
- return nil, errors.Wrap(err, "failed to load base64 encoded data")
+ return nil, fmt.Errorf("failed to load base64 encoded data: %w", err)
}
if len(box) < 24+secretbox.Overhead {
return nil, errors.New("Configuration data too short")
@@ -144,13 +144,13 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
if err != nil {
errRemove := os.Remove(envKeyFile)
if errRemove != nil {
- return nil, errors.Wrap(err, "unable to read obscured config key and unable to delete the temp file")
+ return nil, fmt.Errorf("unable to read obscured config key and unable to delete the temp file: %w", err)
}
- return nil, errors.Wrap(err, "unable to read obscured config key")
+ return nil, fmt.Errorf("unable to read obscured config key: %w", err)
}
errRemove := os.Remove(envKeyFile)
if errRemove != nil {
- return nil, errors.Wrap(errRemove, "unable to delete temp file with configKey")
+ return nil, fmt.Errorf("unable to delete temp file with configKey: %w", errRemove)
}
configKey = []byte(obscure.MustReveal(string(obscuredKey)))
fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey")
@@ -201,12 +201,12 @@ func Encrypt(src io.Reader, dst io.Writer) error {
var nonce [24]byte
n, _ := rand.Read(nonce[:])
if n != 24 {
- return errors.Errorf("nonce short read: %d", n)
+ return fmt.Errorf("nonce short read: %d", n)
}
enc := base64.NewEncoder(base64.StdEncoding, dst)
_, err := enc.Write(nonce[:])
if err != nil {
- return errors.Errorf("Failed to write config file: %v", err)
+ return fmt.Errorf("Failed to write config file: %v", err)
}
var key [32]byte
@@ -219,7 +219,7 @@ func Encrypt(src io.Reader, dst io.Writer) error {
b := secretbox.Seal(nil, data, &nonce, &key)
_, err = enc.Write(b)
if err != nil {
- return errors.Errorf("Failed to write config file: %v", err)
+ return fmt.Errorf("Failed to write config file: %v", err)
}
return enc.Close()
}
@@ -258,32 +258,32 @@ func SetConfigPassword(password string) error {
if PassConfigKeyForDaemonization {
tempFile, err := ioutil.TempFile("", "rclone")
if err != nil {
- return errors.Wrap(err, "cannot create temp file to store configKey")
+ return fmt.Errorf("cannot create temp file to store configKey: %w", err)
}
_, err = tempFile.WriteString(obscure.MustObscure(string(configKey)))
if err != nil {
errRemove := os.Remove(tempFile.Name())
if errRemove != nil {
- return errors.Wrap(err, "error writing configKey to temp file and also error deleting it")
+ return fmt.Errorf("error writing configKey to temp file and also error deleting it: %w", err)
}
- return errors.Wrap(err, "error writing configKey to temp file")
+ return fmt.Errorf("error writing configKey to temp file: %w", err)
}
err = tempFile.Close()
if err != nil {
errRemove := os.Remove(tempFile.Name())
if errRemove != nil {
- return errors.Wrap(err, "error closing temp file with configKey and also error deleting it")
+ return fmt.Errorf("error closing temp file with configKey and also error deleting it: %w", err)
}
- return errors.Wrap(err, "error closing temp file with configKey")
+ return fmt.Errorf("error closing temp file with configKey: %w", err)
}
fs.Debugf(nil, "saving configKey to temp file")
err = os.Setenv("_RCLONE_CONFIG_KEY_FILE", tempFile.Name())
if err != nil {
errRemove := os.Remove(tempFile.Name())
if errRemove != nil {
- return errors.Wrap(err, "unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file")
+ return fmt.Errorf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file: %w", err)
}
- return errors.Wrap(err, "unable to set environment variable _RCLONE_CONFIG_KEY_FILE")
+ return fmt.Errorf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE: %w", err)
}
}
return nil
diff --git a/fs/config/obscure/obscure.go b/fs/config/obscure/obscure.go
index 2f2261f3d..17aae165b 100644
--- a/fs/config/obscure/obscure.go
+++ b/fs/config/obscure/obscure.go
@@ -6,10 +6,10 @@ import (
"crypto/cipher"
"crypto/rand"
"encoding/base64"
+ "errors"
+ "fmt"
"io"
"log"
-
- "github.com/pkg/errors"
)
// crypt internals
@@ -50,10 +50,10 @@ func Obscure(x string) (string, error) {
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
iv := ciphertext[:aes.BlockSize]
if _, err := io.ReadFull(cryptRand, iv); err != nil {
- return "", errors.Wrap(err, "failed to read iv")
+ return "", fmt.Errorf("failed to read iv: %w", err)
}
if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
- return "", errors.Wrap(err, "encrypt failed")
+ return "", fmt.Errorf("encrypt failed: %w", err)
}
return base64.RawURLEncoding.EncodeToString(ciphertext), nil
}
@@ -71,7 +71,7 @@ func MustObscure(x string) string {
func Reveal(x string) (string, error) {
ciphertext, err := base64.RawURLEncoding.DecodeString(x)
if err != nil {
- return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
+ return "", fmt.Errorf("base64 decode failed when revealing password - is it obscured?: %w", err)
}
if len(ciphertext) < aes.BlockSize {
return "", errors.New("input too short when revealing password - is it obscured?")
@@ -79,7 +79,7 @@ func Reveal(x string) (string, error) {
buf := ciphertext[aes.BlockSize:]
iv := ciphertext[:aes.BlockSize]
if err := crypt(buf, buf, iv); err != nil {
- return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
+ return "", fmt.Errorf("decrypt failed when revealing password - is it obscured?: %w", err)
}
return string(buf), nil
}
diff --git a/fs/config/rc.go b/fs/config/rc.go
index b5d4d0b91..fbbc7bdc9 100644
--- a/fs/config/rc.go
+++ b/fs/config/rc.go
@@ -2,8 +2,8 @@ package config
import (
"context"
+ "errors"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
diff --git a/fs/config/ui.go b/fs/config/ui.go
index 099ede7e5..5e2ded4af 100644
--- a/fs/config/ui.go
+++ b/fs/config/ui.go
@@ -5,6 +5,7 @@ package config
import (
"bufio"
"context"
+ "errors"
"fmt"
"log"
"os"
@@ -13,7 +14,6 @@ import (
"strings"
"unicode/utf8"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -281,7 +281,7 @@ func backendConfig(ctx context.Context, name string, m configmap.Mapper, ri *fs.
if value != "" {
err := out.Option.Set(value)
if err != nil {
- return errors.Wrap(err, "failed to set option")
+ return fmt.Errorf("failed to set option: %w", err)
}
}
in.Result = out.Option.String()
diff --git a/fs/countsuffix.go b/fs/countsuffix.go
index 93870a208..cea3b6329 100644
--- a/fs/countsuffix.go
+++ b/fs/countsuffix.go
@@ -2,13 +2,12 @@ package fs
// CountSuffix is parsed by flag with k/M/G decimal suffixes
import (
+ "errors"
"fmt"
"math"
"sort"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
// CountSuffix is an int64 with a friendly way of printing setting
@@ -138,7 +137,7 @@ func (x *CountSuffix) Set(s string) error {
}
default:
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
- return errors.Errorf("bad suffix %q", suffix)
+ return fmt.Errorf("bad suffix %q", suffix)
}
}
s = s[:len(s)-suffixLen]
@@ -147,7 +146,7 @@ func (x *CountSuffix) Set(s string) error {
return err
}
if value < 0 {
- return errors.Errorf("size can't be negative %q", s)
+ return fmt.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = CountSuffix(value)
diff --git a/fs/cutoffmode.go b/fs/cutoffmode.go
index 359143ee7..a58c5c452 100644
--- a/fs/cutoffmode.go
+++ b/fs/cutoffmode.go
@@ -3,8 +3,6 @@ package fs
import (
"fmt"
"strings"
-
- "github.com/pkg/errors"
)
// CutoffMode describes the possible delete modes in the config
@@ -40,7 +38,7 @@ func (m *CutoffMode) Set(s string) error {
return nil
}
}
- return errors.Errorf("Unknown cutoff mode %q", s)
+ return fmt.Errorf("Unknown cutoff mode %q", s)
}
// Type of the value
@@ -52,7 +50,7 @@ func (m *CutoffMode) Type() string {
func (m *CutoffMode) UnmarshalJSON(in []byte) error {
return UnmarshalJSONFlag(in, m, func(i int64) error {
if i < 0 || i >= int64(len(cutoffModeToString)) {
- return errors.Errorf("Out of range cutoff mode %d", i)
+ return fmt.Errorf("Out of range cutoff mode %d", i)
}
*m = (CutoffMode)(i)
return nil
diff --git a/fs/dirtree/dirtree.go b/fs/dirtree/dirtree.go
index 64f5b2512..cb78ab9df 100644
--- a/fs/dirtree/dirtree.go
+++ b/fs/dirtree/dirtree.go
@@ -10,7 +10,6 @@ import (
"time"
"github.com/rclone/rclone/fs"
- "github.com/rclone/rclone/lib/errors"
)
// DirTree is a map of directories to entries
@@ -153,7 +152,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
case fs.Object:
// do nothing
default:
- return errors.Errorf("unknown object type %T", entry)
+ return fmt.Errorf("unknown object type %T", entry)
}
}
@@ -179,7 +178,7 @@ func (dt DirTree) Prune(dirNames map[string]bool) error {
case fs.Object:
// do nothing
default:
- return errors.Errorf("unknown object type %T", entry)
+ return fmt.Errorf("unknown object type %T", entry)
}
}
diff --git a/fs/dump.go b/fs/dump.go
index ec10b9259..f0c72bcab 100644
--- a/fs/dump.go
+++ b/fs/dump.go
@@ -3,8 +3,6 @@ package fs
import (
"fmt"
"strings"
-
- "github.com/pkg/errors"
)
// DumpFlags describes the Dump options in force
@@ -80,7 +78,7 @@ func (f *DumpFlags) Set(s string) error {
}
}
if !found {
- return errors.Errorf("Unknown dump flag %q", part)
+ return fmt.Errorf("Unknown dump flag %q", part)
}
}
*f = flags
diff --git a/fs/filter/filter.go b/fs/filter/filter.go
index ef2a21c3a..72a25462f 100644
--- a/fs/filter/filter.go
+++ b/fs/filter/filter.go
@@ -4,6 +4,7 @@ package filter
import (
"bufio"
"context"
+ "errors"
"fmt"
"log"
"os"
@@ -12,7 +13,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"golang.org/x/sync/errgroup"
)
@@ -318,7 +318,7 @@ func (f *Filter) AddRule(rule string) error {
case strings.HasPrefix(rule, "+ "):
return f.Add(true, rule[2:])
}
- return errors.Errorf("malformed rule %q", rule)
+ return fmt.Errorf("malformed rule %q", rule)
}
// initAddFile creates f.files and f.dirs
diff --git a/fs/filter/glob.go b/fs/filter/glob.go
index f9198a886..fdaf96bdb 100644
--- a/fs/filter/glob.go
+++ b/fs/filter/glob.go
@@ -4,10 +4,9 @@ package filter
import (
"bytes"
+ "fmt"
"regexp"
"strings"
-
- "github.com/pkg/errors"
)
// GlobToRegexp converts an rsync style glob to a regexp
@@ -33,7 +32,7 @@ func GlobToRegexp(glob string, ignoreCase bool) (*regexp.Regexp, error) {
case 2:
_, _ = re.WriteString(`.*`)
default:
- return errors.Errorf("too many stars in %q", glob)
+ return fmt.Errorf("too many stars in %q", glob)
}
}
consecutiveStars = 0
@@ -76,16 +75,16 @@ func GlobToRegexp(glob string, ignoreCase bool) (*regexp.Regexp, error) {
_, _ = re.WriteRune(c)
inBrackets++
case ']':
- return nil, errors.Errorf("mismatched ']' in glob %q", glob)
+ return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
case '{':
if inBraces {
- return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
+ return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
}
inBraces = true
_, _ = re.WriteRune('(')
case '}':
if !inBraces {
- return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
+ return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune(')')
inBraces = false
@@ -107,15 +106,15 @@ func GlobToRegexp(glob string, ignoreCase bool) (*regexp.Regexp, error) {
return nil, err
}
if inBrackets > 0 {
- return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
+ return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
}
if inBraces {
- return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
+ return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune('$')
result, err := regexp.Compile(re.String())
if err != nil {
- return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
+ return nil, fmt.Errorf("bad glob pattern %q (regexp %q): %w", glob, re.String(), err)
}
return result, nil
}
diff --git a/fs/fs.go b/fs/fs.go
index fb4885d34..65d9c17d4 100644
--- a/fs/fs.go
+++ b/fs/fs.go
@@ -3,11 +3,10 @@ package fs
import (
"context"
+ "errors"
"io"
"math"
"time"
-
- "github.com/pkg/errors"
)
// Constants
diff --git a/fs/fs_test.go b/fs/fs_test.go
index 4dad61f01..4e316b9b5 100644
--- a/fs/fs_test.go
+++ b/fs/fs_test.go
@@ -3,6 +3,7 @@ package fs
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"os"
"strings"
@@ -13,7 +14,6 @@ import (
"github.com/spf13/pflag"
"github.com/stretchr/testify/require"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
diff --git a/fs/fserrors/enospc_error.go b/fs/fserrors/enospc_error.go
index 930c5a174..410b513b1 100644
--- a/fs/fserrors/enospc_error.go
+++ b/fs/fserrors/enospc_error.go
@@ -6,13 +6,13 @@ package fserrors
import (
"syscall"
- "github.com/rclone/rclone/lib/errors"
+ liberrors "github.com/rclone/rclone/lib/errors"
)
// IsErrNoSpace checks a possibly wrapped error to
// see if it contains a ENOSPC error
func IsErrNoSpace(cause error) (isNoSpc bool) {
- errors.Walk(cause, func(c error) bool {
+ liberrors.Walk(cause, func(c error) bool {
if c == syscall.ENOSPC {
isNoSpc = true
return true
diff --git a/fs/fserrors/error.go b/fs/fserrors/error.go
index b3fbfcce6..4cdf7ac9d 100644
--- a/fs/fserrors/error.go
+++ b/fs/fserrors/error.go
@@ -3,15 +3,21 @@ package fserrors
import (
"context"
+ "errors"
"fmt"
"io"
"net/http"
"strings"
"time"
- "github.com/rclone/rclone/lib/errors"
+ liberrors "github.com/rclone/rclone/lib/errors"
)
+// Must be satisfied for errors.Is/errors.As/Errors.Unwrap
+type unwrapper interface {
+ Unwrap() error
+}
+
// Retrier is an optional interface for error as to whether the
// operation should be retried at a high level.
//
@@ -34,7 +40,7 @@ func (r retryError) Retry() bool {
return true
}
-// Check interface
+// Check interfaces
var _ Retrier = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
@@ -53,8 +59,9 @@ func (err wrappedRetryError) Retry() bool {
return true
}
-// Check interface
+// Check interfaces
var _ Retrier = wrappedRetryError{error(nil)}
+var _ unwrapper = wrappedRetryError{}
// RetryError makes an error which indicates it would like to be retried
func RetryError(err error) error {
@@ -64,14 +71,14 @@ func RetryError(err error) error {
return wrappedRetryError{err}
}
-func (err wrappedRetryError) Cause() error {
+func (err wrappedRetryError) Unwrap() error {
return err.error
}
// IsRetryError returns true if err conforms to the Retry interface
// and calling the Retry method returns true.
func IsRetryError(err error) (isRetry bool) {
- errors.Walk(err, func(err error) bool {
+ liberrors.Walk(err, func(err error) bool {
if r, ok := err.(Retrier); ok {
isRetry = r.Retry()
return true
@@ -101,8 +108,9 @@ func (err wrappedFatalError) Fatal() bool {
return true
}
-// Check interface
+// Check interfaces
var _ Fataler = wrappedFatalError{error(nil)}
+var _ unwrapper = wrappedFatalError{}
// FatalError makes an error which indicates it is a fatal error and
// the sync should stop.
@@ -113,14 +121,14 @@ func FatalError(err error) error {
return wrappedFatalError{err}
}
-func (err wrappedFatalError) Cause() error {
+func (err wrappedFatalError) Unwrap() error {
return err.error
}
// IsFatalError returns true if err conforms to the Fatal interface
// and calling the Fatal method returns true.
func IsFatalError(err error) (isFatal bool) {
- errors.Walk(err, func(err error) bool {
+ liberrors.Walk(err, func(err error) bool {
if r, ok := err.(Fataler); ok {
isFatal = r.Fatal()
return true
@@ -153,8 +161,9 @@ func (err wrappedNoRetryError) NoRetry() bool {
return true
}
-// Check interface
+// Check interfaces
var _ NoRetrier = wrappedNoRetryError{error(nil)}
+var _ unwrapper = wrappedNoRetryError{}
// NoRetryError makes an error which indicates the sync shouldn't be
// retried.
@@ -162,14 +171,14 @@ func NoRetryError(err error) error {
return wrappedNoRetryError{err}
}
-func (err wrappedNoRetryError) Cause() error {
+func (err wrappedNoRetryError) Unwrap() error {
return err.error
}
// IsNoRetryError returns true if err conforms to the NoRetry
// interface and calling the NoRetry method returns true.
func IsNoRetryError(err error) (isNoRetry bool) {
- errors.Walk(err, func(err error) bool {
+ liberrors.Walk(err, func(err error) bool {
if r, ok := err.(NoRetrier); ok {
isNoRetry = r.NoRetry()
return true
@@ -199,8 +208,9 @@ func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
return true
}
-// Check interface
+// Check interfaces
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
+var _ unwrapper = wrappedNoLowLevelRetryError{}
// NoLowLevelRetryError makes an error which indicates the sync
// shouldn't be low level retried.
@@ -208,15 +218,15 @@ func NoLowLevelRetryError(err error) error {
return wrappedNoLowLevelRetryError{err}
}
-// Cause returns the underlying error
-func (err wrappedNoLowLevelRetryError) Cause() error {
+// Unwrap returns the underlying error
+func (err wrappedNoLowLevelRetryError) Unwrap() error {
return err.error
}
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
// interface and calling the NoLowLevelRetry method returns true.
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
- errors.Walk(err, func(err error) bool {
+ liberrors.Walk(err, func(err error) bool {
if r, ok := err.(NoLowLevelRetrier); ok {
isNoLowLevelRetry = r.NoLowLevelRetry()
return true
@@ -257,13 +267,13 @@ func (e ErrorRetryAfter) RetryAfter() time.Time {
return time.Time(e)
}
-// Check interface
+// Check interfaces
var _ RetryAfter = ErrorRetryAfter{}
// RetryAfterErrorTime returns the time that the RetryAfter error
// indicates or a Zero time.Time
func RetryAfterErrorTime(err error) (retryAfter time.Time) {
- errors.Walk(err, func(err error) bool {
+ liberrors.Walk(err, func(err error) bool {
if r, ok := err.(RetryAfter); ok {
retryAfter = r.RetryAfter()
return true
@@ -303,7 +313,7 @@ func (err *wrappedCountableError) IsCounted() bool {
return err.isCounted
}
-func (err *wrappedCountableError) Cause() error {
+func (err wrappedCountableError) Unwrap() error {
return err.error
}
@@ -326,6 +336,7 @@ func Count(err error) {
// Check interface
var _ CountableError = &wrappedCountableError{error: error(nil)}
+var _ unwrapper = wrappedCountableError{}
// FsError makes an error which can keep a record that it is already counted
// or not
@@ -340,7 +351,7 @@ func FsError(err error) error {
// library errors too. It returns true if any of the intermediate
// errors had a Timeout() or Temporary() method which returned true.
func Cause(cause error) (retriable bool, err error) {
- errors.Walk(cause, func(c error) bool {
+ liberrors.Walk(cause, func(c error) bool {
// Check for net error Timeout()
if x, ok := c.(interface {
Timeout() bool
@@ -453,13 +464,3 @@ func ContextError(ctx context.Context, perr *error) bool {
}
return false
}
-
-type causer interface {
- Cause() error
-}
-
-var (
- _ causer = wrappedRetryError{}
- _ causer = wrappedFatalError{}
- _ causer = wrappedNoRetryError{}
-)
diff --git a/fs/fserrors/error_test.go b/fs/fserrors/error_test.go
index da8391b0e..a0cf4e2e6 100644
--- a/fs/fserrors/error_test.go
+++ b/fs/fserrors/error_test.go
@@ -2,6 +2,7 @@ package fserrors
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -11,10 +12,33 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
+// withMessage wraps an error with a message
+//
+// This is for backwards compatibility with the now removed github.com/pkg/errors
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
var errUseOfClosedNetworkConnection = errors.New("use of closed network connection")
// make a plausible network error with the underlying errno
@@ -94,8 +118,8 @@ func TestCause(t *testing.T) {
}{
{nil, false, nil},
{errPotato, false, errPotato},
- {errors.Wrap(errPotato, "potato"), false, errPotato},
- {errors.Wrap(errors.Wrap(errPotato, "potato2"), "potato"), false, errPotato},
+ {fmt.Errorf("potato: %w", errPotato), false, errPotato},
+ {fmt.Errorf("potato2: %w", wrap(errPotato, "potato")), false, errPotato},
{errUseOfClosedNetworkConnection, false, errUseOfClosedNetworkConnection},
{makeNetErr(syscall.EAGAIN), true, syscall.EAGAIN},
{makeNetErr(syscall.Errno(123123123)), false, syscall.Errno(123123123)},
@@ -124,7 +148,7 @@ func TestShouldRetry(t *testing.T) {
}{
{nil, false},
{errors.New("potato"), false},
- {errors.Wrap(errUseOfClosedNetworkConnection, "connection"), true},
+ {fmt.Errorf("connection: %w", errUseOfClosedNetworkConnection), true},
{io.EOF, true},
{io.ErrUnexpectedEOF, true},
{makeNetErr(syscall.EAGAIN), true},
@@ -133,7 +157,7 @@ func TestShouldRetry(t *testing.T) {
{&url.Error{Op: "post", URL: "/", Err: errUseOfClosedNetworkConnection}, true},
{&url.Error{Op: "post", URL: "/", Err: fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", fmt.Errorf("http: ContentLength=%d with Body length %d", 100663336, 99590598))}, true},
{
- errors.Wrap(&url.Error{
+ wrap(&url.Error{
Op: "post",
URL: "http://localhost/",
Err: makeNetErr(syscall.EPIPE),
@@ -141,7 +165,7 @@ func TestShouldRetry(t *testing.T) {
true,
},
{
- errors.Wrap(&url.Error{
+ wrap(&url.Error{
Op: "post",
URL: "http://localhost/",
Err: makeNetErr(syscall.Errno(123123123)),
@@ -166,7 +190,7 @@ func TestRetryAfter(t *testing.T) {
assert.Contains(t, e.Error(), "try again after")
t0 := time.Now()
- err := errors.Wrap(ErrorRetryAfter(t0), "potato")
+ err := fmt.Errorf("potato: %w", ErrorRetryAfter(t0))
assert.Equal(t, t0, RetryAfterErrorTime(err))
assert.True(t, IsRetryAfterError(err))
assert.Contains(t, e.Error(), "try again after")
diff --git a/fs/hash/hash.go b/fs/hash/hash.go
index 9d2f368de..a4eaf517d 100644
--- a/fs/hash/hash.go
+++ b/fs/hash/hash.go
@@ -5,6 +5,7 @@ import (
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
+ "errors"
"fmt"
"hash"
"hash/crc32"
@@ -12,7 +13,6 @@ import (
"strings"
"github.com/jzelinskie/whirlpool"
- "github.com/pkg/errors"
)
// Type indicates a standard hashing algorithm
@@ -149,7 +149,7 @@ func (h *Type) Set(s string) error {
*h = hash.hashType
return nil
}
- return errors.Errorf("Unknown hash type %q", s)
+ return fmt.Errorf("Unknown hash type %q", s)
}
// Type of the value
@@ -162,7 +162,7 @@ func (h Type) Type() string {
// and this function must support all types.
func fromTypes(set Set) (map[Type]hash.Hash, error) {
if !set.SubsetOf(Supported()) {
- return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
+ return nil, fmt.Errorf("requested set %08x contains unknown hash types", int(set))
}
hashers := map[Type]hash.Hash{}
diff --git a/fs/list/list.go b/fs/list/list.go
index dfa8b688d..09d47ac8f 100644
--- a/fs/list/list.go
+++ b/fs/list/list.go
@@ -3,10 +3,10 @@ package list
import (
"context"
+ "fmt"
"sort"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
)
@@ -67,7 +67,7 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
}
}
default:
- return nil, errors.Errorf("unknown object type %T", entry)
+ return nil, fmt.Errorf("unknown object type %T", entry)
}
// check remote name belongs in this directory
remote := entry.Remote()
diff --git a/fs/log.go b/fs/log.go
index a3bc71a4a..423352344 100644
--- a/fs/log.go
+++ b/fs/log.go
@@ -6,7 +6,6 @@ import (
"log"
"os"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -62,7 +61,7 @@ func (l *LogLevel) Set(s string) error {
return nil
}
}
- return errors.Errorf("Unknown log level %q", s)
+ return fmt.Errorf("Unknown log level %q", s)
}
// Type of the value
@@ -74,7 +73,7 @@ func (l *LogLevel) Type() string {
func (l *LogLevel) UnmarshalJSON(in []byte) error {
return UnmarshalJSONFlag(in, l, func(i int64) error {
if i < 0 || i >= int64(LogLevel(len(logLevelToString))) {
- return errors.Errorf("Unknown log level %d", i)
+ return fmt.Errorf("Unknown log level %d", i)
}
*l = (LogLevel)(i)
return nil
diff --git a/fs/march/march.go b/fs/march/march.go
index d2193eb14..db045156d 100644
--- a/fs/march/march.go
+++ b/fs/march/march.go
@@ -3,13 +3,12 @@ package march
import (
"context"
+ "fmt"
"path"
"sort"
"strings"
"sync"
- "github.com/pkg/errors"
-
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/filter"
@@ -216,7 +215,7 @@ func (m *March) Run(ctx context.Context) error {
wg.Wait()
if errCount > 1 {
- return errors.Wrapf(jobError, "march failed with %d error(s): first error", errCount)
+ return fmt.Errorf("march failed with %d error(s): first error: %w", errCount, jobError)
}
return jobError
}
diff --git a/fs/mount_helper.go b/fs/mount_helper.go
index 6a1a0953b..884b1f67c 100644
--- a/fs/mount_helper.go
+++ b/fs/mount_helper.go
@@ -1,14 +1,13 @@
package fs
import (
+ "errors"
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
-
- "github.com/pkg/errors"
)
func init() {
@@ -83,7 +82,7 @@ func convertMountHelperArgs(origArgs []string) ([]string, error) {
args = append(args, "--help")
default:
if strings.HasPrefix(arg, "-") {
- return nil, errors.Errorf("flag %q is not supported in mount mode", arg)
+ return nil, fmt.Errorf("flag %q is not supported in mount mode", arg)
}
args = append(args, arg)
}
@@ -149,7 +148,7 @@ func convertMountHelperArgs(origArgs []string) ([]string, error) {
}
}
if parseOpts {
- return nil, errors.Errorf("dangling -o without argument")
+ return nil, fmt.Errorf("dangling -o without argument")
}
if vCount > 0 && !gotVerbose {
diff --git a/fs/open_options.go b/fs/open_options.go
index e39a23fb7..8d997118a 100644
--- a/fs/open_options.go
+++ b/fs/open_options.go
@@ -3,12 +3,12 @@
package fs
import (
+ "errors"
"fmt"
"net/http"
"strconv"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs/hash"
)
diff --git a/fs/operations/check.go b/fs/operations/check.go
index b7c36eec1..12c63e701 100644
--- a/fs/operations/check.go
+++ b/fs/operations/check.go
@@ -4,6 +4,8 @@ import (
"bufio"
"bytes"
"context"
+ "errors"
+ "fmt"
"io"
"os"
"regexp"
@@ -11,7 +13,6 @@ import (
"sync"
"sync/atomic"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
@@ -79,7 +80,7 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
if c.opt.OneWay {
return false
}
- err := errors.Errorf("File not in %v", c.opt.Fsrc)
+ err := fmt.Errorf("File not in %v", c.opt.Fsrc)
fs.Errorf(dst, "%v", err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
@@ -101,7 +102,7 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
switch src.(type) {
case fs.Object:
- err := errors.Errorf("File not in %v", c.opt.Fdst)
+ err := fmt.Errorf("File not in %v", c.opt.Fdst)
fs.Errorf(src, "%v", err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
@@ -124,7 +125,7 @@ func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (di
tr.Done(ctx, err)
}()
if sizeDiffers(ctx, src, dst) {
- err = errors.Errorf("Sizes differ")
+ err = fmt.Errorf("Sizes differ")
fs.Errorf(src, "%v", err)
return true, false, nil
}
@@ -173,7 +174,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
}
}()
} else {
- err := errors.Errorf("is file on %v but directory on %v", c.opt.Fsrc, c.opt.Fdst)
+ err := fmt.Errorf("is file on %v but directory on %v", c.opt.Fsrc, c.opt.Fdst)
fs.Errorf(src, "%v", err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
@@ -186,7 +187,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
if ok {
return true
}
- err := errors.Errorf("is file on %v but directory on %v", c.opt.Fdst, c.opt.Fsrc)
+ err := fmt.Errorf("is file on %v but directory on %v", c.opt.Fdst, c.opt.Fsrc)
fs.Errorf(dst, "%v", err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
@@ -258,7 +259,7 @@ func (c *checkMarch) reportResults(ctx context.Context, err error) error {
}
if c.differences > 0 {
// Return an already counted error so we don't double count this error too
- err = fserrors.FsError(errors.Errorf("%d differences found", c.differences))
+ err = fserrors.FsError(fmt.Errorf("%d differences found", c.differences))
fserrors.Count(err)
return err
}
@@ -277,7 +278,7 @@ func Check(ctx context.Context, opt *CheckOpt) error {
return false, true, nil
}
if !same {
- err = errors.Errorf("%v differ", ht)
+ err = fmt.Errorf("%v differ", ht)
fs.Errorf(src, "%v", err)
return true, false, nil
}
@@ -334,7 +335,7 @@ func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ boo
func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
in1, err := dst.Open(ctx)
if err != nil {
- return true, errors.Wrapf(err, "failed to open %q", dst)
+ return true, fmt.Errorf("failed to open %q: %w", dst, err)
}
tr1 := accounting.Stats(ctx).NewTransfer(dst)
defer func() {
@@ -344,7 +345,7 @@ func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ boo
in2, err := src.Open(ctx)
if err != nil {
- return true, errors.Wrapf(err, "failed to open %q", src)
+ return true, fmt.Errorf("failed to open %q: %w", src, err)
}
tr2 := accounting.Stats(ctx).NewTransfer(dst)
defer func() {
@@ -364,7 +365,7 @@ func CheckDownload(ctx context.Context, opt *CheckOpt) error {
optCopy.Check = func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) {
differ, err = CheckIdenticalDownload(ctx, a, b)
if err != nil {
- return true, true, errors.Wrap(err, "failed to download")
+ return true, true, fmt.Errorf("failed to download: %w", err)
}
return differ, false, nil
}
@@ -386,19 +387,19 @@ func CheckSum(ctx context.Context, fsrc, fsum fs.Fs, sumFile string, hashType ha
opt = &options // override supplied argument
if !download && (hashType == hash.None || !opt.Fdst.Hashes().Contains(hashType)) {
- return errors.Errorf("%s: hash type is not supported by file system: %s", hashType, opt.Fdst)
+ return fmt.Errorf("%s: hash type is not supported by file system: %s", hashType, opt.Fdst)
}
if sumFile == "" {
- return errors.Errorf("not a sum file: %s", fsum)
+ return fmt.Errorf("not a sum file: %s", fsum)
}
sumObj, err := fsum.NewObject(ctx, sumFile)
if err != nil {
- return errors.Wrap(err, "cannot open sum file")
+ return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := ParseSumFile(ctx, sumObj)
if err != nil {
- return errors.Wrap(err, "failed to parse sum file")
+ return fmt.Errorf("failed to parse sum file: %w", err)
}
ci := fs.GetConfig(ctx)
@@ -421,7 +422,7 @@ func CheckSum(ctx context.Context, fsrc, fsum fs.Fs, sumFile string, hashType ha
continue
}
// filesystem missed the file, sum wasn't consumed
- err := errors.Errorf("File not in %v", opt.Fdst)
+ err := fmt.Errorf("File not in %v", opt.Fdst)
fs.Errorf(filename, "%v", err)
_ = fs.CountError(err)
if lastErr == nil {
diff --git a/fs/operations/check_test.go b/fs/operations/check_test.go
index 4615bbd13..6f914abc0 100644
--- a/fs/operations/check_test.go
+++ b/fs/operations/check_test.go
@@ -3,6 +3,7 @@ package operations_test
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"log"
@@ -11,7 +12,6 @@ import (
"strings"
"testing"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
diff --git a/fs/operations/dedupe.go b/fs/operations/dedupe.go
index 11ad82dd8..1da760a7a 100644
--- a/fs/operations/dedupe.go
+++ b/fs/operations/dedupe.go
@@ -10,7 +10,6 @@ import (
"sort"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash"
@@ -237,7 +236,7 @@ func (x *DeduplicateMode) Set(s string) error {
case "list":
*x = DeduplicateList
default:
- return errors.Errorf("Unknown mode for dedupe %q.", s)
+ return fmt.Errorf("unknown mode for dedupe %q", s)
}
return nil
}
@@ -319,7 +318,7 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*d
return nil
})
if err != nil {
- return nil, errors.Wrap(err, "find duplicate dirs")
+ return nil, fmt.Errorf("find duplicate dirs: %w", err)
}
// Make sure parents are before children
@@ -341,11 +340,11 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*d
func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]*dedupeDir) error {
mergeDirs := f.Features().MergeDirs
if mergeDirs == nil {
- return errors.Errorf("%v: can't merge directories", f)
+ return fmt.Errorf("%v: can't merge directories", f)
}
dirCacheFlush := f.Features().DirCacheFlush
if dirCacheFlush == nil {
- return errors.Errorf("%v: can't flush dir cache", f)
+ return fmt.Errorf("%v: can't flush dir cache", f)
}
for _, dedupeDirs := range duplicateDirs {
if SkipDestructive(ctx, dedupeDirs[0].dir, "merge duplicate directories") {
@@ -400,7 +399,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool
what := "names"
if byHash {
if ht == hash.None {
- return errors.Errorf("%v has no hashes", f)
+ return fmt.Errorf("%v has no hashes", f)
}
what = ht.String() + " hashes"
}
diff --git a/fs/operations/lsjson.go b/fs/operations/lsjson.go
index c4ab976c9..5af5a4295 100644
--- a/fs/operations/lsjson.go
+++ b/fs/operations/lsjson.go
@@ -2,11 +2,12 @@ package operations
import (
"context"
+ "errors"
+ "fmt"
"path"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
@@ -118,14 +119,14 @@ func newListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOp
if opt.ShowEncrypted {
fsInfo, _, _, config, err := fs.ConfigFs(fsrc.Name() + ":" + fsrc.Root())
if err != nil {
- return nil, errors.Wrap(err, "ListJSON failed to load config for crypt remote")
+ return nil, fmt.Errorf("ListJSON failed to load config for crypt remote: %w", err)
}
if fsInfo.Name != "crypt" {
return nil, errors.New("The remote needs to be of type \"crypt\"")
}
lj.cipher, err = crypt.NewCipher(config)
if err != nil {
- return nil, errors.Wrap(err, "ListJSON failed to make new crypt remote")
+ return nil, fmt.Errorf("ListJSON failed to make new crypt remote: %w", err)
}
}
features := fsrc.Features()
@@ -237,19 +238,19 @@ func ListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt,
for _, entry := range entries {
item, err := lj.entry(ctx, entry)
if err != nil {
- return errors.Wrap(err, "creating entry failed in ListJSON")
+ return fmt.Errorf("creating entry failed in ListJSON: %w", err)
}
if item != nil {
err = callback(item)
if err != nil {
- return errors.Wrap(err, "callback failed in ListJSON")
+ return fmt.Errorf("callback failed in ListJSON: %w", err)
}
}
}
return nil
})
if err != nil {
- return errors.Wrap(err, "error in ListJSON")
+ return fmt.Errorf("error in ListJSON: %w", err)
}
return nil
}
diff --git a/fs/operations/multithread.go b/fs/operations/multithread.go
index db246b82a..b8ace21d5 100644
--- a/fs/operations/multithread.go
+++ b/fs/operations/multithread.go
@@ -2,9 +2,10 @@ package operations
import (
"context"
+ "errors"
+ "fmt"
"io"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"golang.org/x/sync/errgroup"
@@ -76,7 +77,7 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
rc, err := NewReOpen(ctx, mc.src, ci.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1})
if err != nil {
- return errors.Wrap(err, "multipart copy: failed to open source")
+ return fmt.Errorf("multipart copy: failed to open source: %w", err)
}
defer fs.CheckClose(rc, &err)
@@ -92,29 +93,29 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
if nr > 0 {
err = mc.acc.AccountRead(nr)
if err != nil {
- return errors.Wrap(err, "multipart copy: accounting failed")
+ return fmt.Errorf("multipart copy: accounting failed: %w", err)
}
nw, ew := mc.wc.WriteAt(buf[0:nr], offset)
if nw > 0 {
offset += int64(nw)
}
if ew != nil {
- return errors.Wrap(ew, "multipart copy: write failed")
+ return fmt.Errorf("multipart copy: write failed: %w", ew)
}
if nr != nw {
- return errors.Wrap(io.ErrShortWrite, "multipart copy")
+ return fmt.Errorf("multipart copy: %w", io.ErrShortWrite)
}
}
if er != nil {
if er != io.EOF {
- return errors.Wrap(er, "multipart copy: read failed")
+ return fmt.Errorf("multipart copy: read failed: %w", er)
}
break
}
}
if offset != end {
- return errors.Errorf("multipart copy: wrote %d bytes but expected to write %d", offset-start, end-start)
+ return fmt.Errorf("multipart copy: wrote %d bytes but expected to write %d", offset-start, end-start)
}
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v finished", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start))
@@ -166,7 +167,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object,
// create write file handle
mc.wc, err = openWriterAt(gCtx, remote, mc.size)
if err != nil {
- return nil, errors.Wrap(err, "multipart copy: failed to open destination")
+ return nil, fmt.Errorf("multipart copy: failed to open destination: %w", err)
}
fs.Debugf(src, "Starting multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize))
@@ -182,19 +183,19 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object,
return nil, err
}
if closeErr != nil {
- return nil, errors.Wrap(closeErr, "multi-thread copy: failed to close object after copy")
+ return nil, fmt.Errorf("multi-thread copy: failed to close object after copy: %w", closeErr)
}
obj, err := f.NewObject(ctx, remote)
if err != nil {
- return nil, errors.Wrap(err, "multi-thread copy: failed to find object after copy")
+ return nil, fmt.Errorf("multi-thread copy: failed to find object after copy: %w", err)
}
err = obj.SetModTime(ctx, src.ModTime(ctx))
switch err {
case nil, fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete:
default:
- return nil, errors.Wrap(err, "multi-thread copy: failed to set modification time")
+ return nil, fmt.Errorf("multi-thread copy: failed to set modification time: %w", err)
}
fs.Debugf(src, "Finished multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize))
diff --git a/fs/operations/operations.go b/fs/operations/operations.go
index 30a0306e7..54311b5f7 100644
--- a/fs/operations/operations.go
+++ b/fs/operations/operations.go
@@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/csv"
"encoding/hex"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -21,7 +22,6 @@ import (
"sync/atomic"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
@@ -441,7 +441,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
}
in0, err = NewReOpen(ctx, src, ci.LowLevelRetries, options...)
if err != nil {
- err = errors.Wrap(err, "failed to open source object")
+ err = fmt.Errorf("failed to open source object: %w", err)
} else {
if src.Size() == -1 {
// -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream.
@@ -512,7 +512,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
// Verify sizes are the same after transfer
if sizeDiffers(ctx, src, dst) {
- err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
+ err = fmt.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
fs.Errorf(dst, "%v", err)
err = fs.CountError(err)
removeFailedCopy(ctx, dst)
@@ -524,7 +524,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
// checkHashes has logged and counted errors
equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType)
if !equal {
- err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
+ err = fmt.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
fs.Errorf(dst, "%v", err)
err = fs.CountError(err)
removeFailedCopy(ctx, dst)
@@ -727,7 +727,7 @@ func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, b
fs.Debugf(nil, "Waiting for deletions to finish")
wg.Wait()
if errorCount > 0 {
- err := errors.Errorf("failed to delete %d files", errorCount)
+ err := fmt.Errorf("failed to delete %d files", errorCount)
if fatalErrorCount > 0 {
return fserrors.FatalError(err)
}
@@ -968,7 +968,7 @@ func hashSum(ctx context.Context, ht hash.Type, downloadFlag bool, o fs.Object)
}
in, err := NewReOpen(ctx, o, fs.GetConfig(ctx).LowLevelRetries, options...)
if err != nil {
- return "ERROR", errors.Wrapf(err, "Failed to open file %v", o)
+ return "ERROR", fmt.Errorf("Failed to open file %v: %w", o, err)
}
// Account and buffer the transfer
@@ -977,19 +977,19 @@ func hashSum(ctx context.Context, ht hash.Type, downloadFlag bool, o fs.Object)
// Setup hasher
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
- return "UNSUPPORTED", errors.Wrap(err, "Hash unsupported")
+ return "UNSUPPORTED", fmt.Errorf("Hash unsupported: %w", err)
}
// Copy to hasher, downloading the file and passing directly to hash
_, err = io.Copy(hasher, in)
if err != nil {
- return "ERROR", errors.Wrap(err, "Failed to copy file to hasher")
+ return "ERROR", fmt.Errorf("Failed to copy file to hasher: %w", err)
}
// Get hash and encode as hex
byteSum, err := hasher.Sum(ht)
if err != nil {
- return "ERROR", errors.Wrap(err, "Hasher returned an error")
+ return "ERROR", fmt.Errorf("Hasher returned an error: %w", err)
}
sum = hex.EncodeToString(byteSum)
} else {
@@ -1000,10 +1000,10 @@ func hashSum(ctx context.Context, ht hash.Type, downloadFlag bool, o fs.Object)
sum, err = o.Hash(ctx, ht)
if err == hash.ErrUnsupported {
- return "", errors.Wrap(err, "Hash unsupported")
+ return "", fmt.Errorf("Hash unsupported: %w", err)
}
if err != nil {
- return "", errors.Wrapf(err, "Failed to get hash %v from backend: %v", ht, err)
+ return "", fmt.Errorf("Failed to get hash %v from backend: %v: %w", ht, err, err)
}
}
@@ -1183,7 +1183,7 @@ func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
return nil
})
if err != nil && err != fs.ErrorDirNotFound {
- err = errors.Wrap(err, "failed to list")
+ err = fmt.Errorf("failed to list: %w", err)
err = fs.CountError(err)
fs.Errorf(nil, "%v", err)
}
@@ -1195,7 +1195,7 @@ func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
func CleanUp(ctx context.Context, f fs.Fs) error {
doCleanUp := f.Features().CleanUp
if doCleanUp == nil {
- return errors.Errorf("%v doesn't support cleanup", f)
+ return fmt.Errorf("%v doesn't support cleanup", f)
}
if SkipDestructive(ctx, f, "clean up old files") {
return nil
@@ -1298,7 +1298,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
}
src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, sums, fdst)
if !Equal(ctx, src, dst) {
- err = errors.Errorf("corrupted on transfer")
+ err = fmt.Errorf("corrupted on transfer")
err = fs.CountError(err)
fs.Errorf(dst, "%v", err)
return err
@@ -1326,7 +1326,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file")
tmpLocalFs, err := fs.TemporaryLocalFs(ctx)
if err != nil {
- return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
+ return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
}
defer func() {
err := Purge(ctx, tmpLocalFs, "")
@@ -1361,7 +1361,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
func PublicLink(ctx context.Context, f fs.Fs, remote string, expire fs.Duration, unlink bool) (string, error) {
doPublicLink := f.Features().PublicLink
if doPublicLink == nil {
- return "", errors.Errorf("%v doesn't support public links", f)
+ return "", fmt.Errorf("%v doesn't support public links", f)
}
return doPublicLink(ctx, remote, expire, unlink)
}
@@ -1410,7 +1410,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
return nil
})
if err != nil {
- return errors.Wrap(err, "failed to rmdirs")
+ return fmt.Errorf("failed to rmdirs: %w", err)
}
// Now delete the empty directories, starting from the longest path
var toDelete []string
@@ -1442,7 +1442,7 @@ func GetCompareDest(ctx context.Context) (CompareDest []fs.Fs, err error) {
ci := fs.GetConfig(ctx)
CompareDest, err = cache.GetArr(ctx, ci.CompareDest)
if err != nil {
- return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", ci.CompareDest, err))
+ return nil, fserrors.FatalError(fmt.Errorf("Failed to make fs for --compare-dest %q: %v", ci.CompareDest, err))
}
return CompareDest, nil
}
@@ -1481,7 +1481,7 @@ func GetCopyDest(ctx context.Context, fdst fs.Fs) (CopyDest []fs.Fs, err error)
ci := fs.GetConfig(ctx)
CopyDest, err = cache.GetArr(ctx, ci.CopyDest)
if err != nil {
- return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", ci.CopyDest, err))
+ return nil, fserrors.FatalError(fmt.Errorf("Failed to make fs for --copy-dest %q: %v", ci.CopyDest, err))
}
if !SameConfigArr(fdst, CopyDest) {
return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination"))
@@ -1522,7 +1522,7 @@ func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, bac
if dst != nil && backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dst)
if err != nil {
- return false, errors.Wrap(err, "moving to --backup-dir failed")
+ return false, fmt.Errorf("moving to --backup-dir failed: %w", err)
}
// If successful zero out the dstObj as it is no longer there
dst = nil
@@ -1685,7 +1685,7 @@ func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameF
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
- return errors.Errorf("CopyURL failed: %s", resp.Status)
+ return fmt.Errorf("CopyURL failed: %s", resp.Status)
}
modTime, err := http.ParseTime(resp.Header.Get("Last-Modified"))
if err != nil {
@@ -1694,7 +1694,7 @@ func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameF
if dstFileNameFromURL {
dstFileName = path.Base(resp.Request.URL.Path)
if dstFileName == "." || dstFileName == "/" {
- return errors.Errorf("CopyURL failed: file name wasn't found in url")
+ return fmt.Errorf("CopyURL failed: file name wasn't found in url")
}
fs.Debugf(dstFileName, "File name found in url")
}
@@ -1731,7 +1731,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
if ci.BackupDir != "" {
backupDir, err = cache.Get(ctx, ci.BackupDir)
if err != nil {
- return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", ci.BackupDir, err))
+ return nil, fserrors.FatalError(fmt.Errorf("Failed to make fs for --backup-dir %q: %v", ci.BackupDir, err))
}
if !SameConfig(fdst, backupDir) {
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
@@ -1818,7 +1818,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
if err == nil {
return errors.New("found an already existing file with a randomly generated name. Try the operation again")
}
- return errors.Wrap(err, "error while attempting to move file to a temporary location")
+ return fmt.Errorf("error while attempting to move file to a temporary location: %w", err)
}
tr := accounting.Stats(ctx).NewTransfer(srcObj)
defer func() {
@@ -1826,7 +1826,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
}()
tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj)
if err != nil {
- return errors.Wrap(err, "error while moving file to temporary location")
+ return fmt.Errorf("error while moving file to temporary location: %w", err)
}
_, err = Op(ctx, fdst, nil, dstFileName, tmpObj)
return err
@@ -1837,7 +1837,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
if ci.BackupDir != "" || ci.Suffix != "" {
backupDir, err = BackupDir(ctx, fdst, fsrc, srcFileName)
if err != nil {
- return errors.Wrap(err, "creating Fs for --backup-dir failed")
+ return fmt.Errorf("creating Fs for --backup-dir failed: %w", err)
}
}
if len(ci.CompareDest) > 0 {
@@ -1860,7 +1860,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
if dstObj != nil && backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dstObj)
if err != nil {
- return errors.Wrap(err, "moving to --backup-dir failed")
+ return fmt.Errorf("moving to --backup-dir failed: %w", err)
}
// If successful zero out the dstObj as it is no longer there
dstObj = nil
@@ -1914,7 +1914,7 @@ func TouchDir(ctx context.Context, f fs.Fs, t time.Time, recursive bool) error {
fs.Debugf(f, "Touching %q", o.Remote())
err := o.SetModTime(ctx, t)
if err != nil {
- err = errors.Wrap(err, "failed to touch")
+ err = fmt.Errorf("failed to touch: %w", err)
err = fs.CountError(err)
fs.Errorf(o, "%v", err)
}
@@ -2093,7 +2093,7 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
// Load the directory tree into memory
tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1)
if err != nil {
- return errors.Wrap(err, "RenameDir tree walk")
+ return fmt.Errorf("RenameDir tree walk: %w", err)
}
// Get the directories in sorted order
@@ -2104,7 +2104,7 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
dstPath := dstRemote + dir[len(srcRemote):]
err := f.Mkdir(ctx, dstPath)
if err != nil {
- return errors.Wrap(err, "RenameDir mkdir")
+ return fmt.Errorf("RenameDir mkdir: %w", err)
}
}
@@ -2144,14 +2144,14 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
close(renames)
err = g.Wait()
if err != nil {
- return errors.Wrap(err, "RenameDir renames")
+ return fmt.Errorf("RenameDir renames: %w", err)
}
// Remove the source directories in reverse order
for i := len(dirs) - 1; i >= 0; i-- {
err := f.Rmdir(ctx, dirs[i])
if err != nil {
- return errors.Wrap(err, "RenameDir rmdir")
+ return fmt.Errorf("RenameDir rmdir: %w", err)
}
}
diff --git a/fs/operations/operations_test.go b/fs/operations/operations_test.go
index 719f2e8c5..c1f826df9 100644
--- a/fs/operations/operations_test.go
+++ b/fs/operations/operations_test.go
@@ -23,6 +23,7 @@ import (
"bytes"
"context"
"crypto/rand"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -34,7 +35,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/all" // import all backends
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -1593,8 +1593,8 @@ func TestTouchDir(t *testing.T) {
err := operations.TouchDir(ctx, r.Fremote, timeValue, true)
require.NoError(t, err)
if accounting.Stats(ctx).GetErrors() != 0 {
- err = errors.Cause(accounting.Stats(ctx).GetLastError())
- require.True(t, err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete)
+ err = accounting.Stats(ctx).GetLastError()
+ require.True(t, errors.Is(err, fs.ErrorCantSetModTime) || errors.Is(err, fs.ErrorCantSetModTimeWithoutDelete))
} else {
file1.ModTime = timeValue
file2.ModTime = timeValue
diff --git a/fs/operations/rc.go b/fs/operations/rc.go
index 9b5723c38..22aa4ac74 100644
--- a/fs/operations/rc.go
+++ b/fs/operations/rc.go
@@ -2,6 +2,7 @@ package operations
import (
"context"
+ "fmt"
"io"
"mime"
"mime/multipart"
@@ -10,7 +11,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
@@ -140,15 +140,15 @@ func rcAbout(ctx context.Context, in rc.Params) (out rc.Params, err error) {
}
doAbout := f.Features().About
if doAbout == nil {
- return nil, errors.Errorf("%v doesn't support about", f)
+ return nil, fmt.Errorf("%v doesn't support about", f)
}
u, err := doAbout(ctx)
if err != nil {
- return nil, errors.Wrap(err, "about call failed")
+ return nil, fmt.Errorf("about call failed: %w", err)
}
err = rc.Reshape(&out, u)
if err != nil {
- return nil, errors.Wrap(err, "about Reshape failed")
+ return nil, fmt.Errorf("about Reshape failed: %w", err)
}
return out, nil
}
@@ -469,7 +469,7 @@ func rcFsInfo(ctx context.Context, in rc.Params) (out rc.Params, err error) {
info := GetFsInfo(f)
err = rc.Reshape(&out, info)
if err != nil {
- return nil, errors.Wrap(err, "fsinfo Reshape failed")
+ return nil, fmt.Errorf("fsinfo Reshape failed: %w", err)
}
return out, nil
}
@@ -533,7 +533,7 @@ func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
}
doCommand := f.Features().Command
if doCommand == nil {
- return nil, errors.Errorf("%v: doesn't support backend commands", f)
+ return nil, fmt.Errorf("%v: doesn't support backend commands", f)
}
command, err := in.GetString("command")
if err != nil {
@@ -551,7 +551,7 @@ func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
}
result, err := doCommand(context.Background(), command, arg, opt)
if err != nil {
- return nil, errors.Wrapf(err, "command %q failed", command)
+ return nil, fmt.Errorf("command %q failed: %w", command, err)
}
out = make(rc.Params)
diff --git a/fs/operations/reopen.go b/fs/operations/reopen.go
index 688c5d057..1f575d7cc 100644
--- a/fs/operations/reopen.go
+++ b/fs/operations/reopen.go
@@ -2,10 +2,10 @@ package operations
import (
"context"
+ "errors"
"io"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
)
diff --git a/fs/operations/reopen_test.go b/fs/operations/reopen_test.go
index d7f7fbe94..3a75bc0bc 100644
--- a/fs/operations/reopen_test.go
+++ b/fs/operations/reopen_test.go
@@ -2,11 +2,11 @@ package operations
import (
"context"
+ "errors"
"io"
"io/ioutil"
"testing"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest/mockobject"
diff --git a/fs/rc/config.go b/fs/rc/config.go
index c996b3447..f0cc4503a 100644
--- a/fs/rc/config.go
+++ b/fs/rc/config.go
@@ -6,8 +6,8 @@ package rc
import (
"context"
+ "fmt"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
)
@@ -146,16 +146,16 @@ func rcOptionsSet(ctx context.Context, in Params) (out Params, err error) {
for name, options := range in {
current := optionBlock[name]
if current == nil {
- return nil, errors.Errorf("unknown option block %q", name)
+ return nil, fmt.Errorf("unknown option block %q", name)
}
err := Reshape(current, options)
if err != nil {
- return nil, errors.Wrapf(err, "failed to write options from block %q", name)
+ return nil, fmt.Errorf("failed to write options from block %q: %w", name, err)
}
if reload := optionReload[name]; reload != nil {
err = reload(ctx)
if err != nil {
- return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
+ return nil, fmt.Errorf("failed to reload options from block %q: %w", name, err)
}
}
}
diff --git a/fs/rc/config_test.go b/fs/rc/config_test.go
index 144edc352..01b25983c 100644
--- a/fs/rc/config_test.go
+++ b/fs/rc/config_test.go
@@ -3,10 +3,10 @@ package rc
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"testing"
- "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
diff --git a/fs/rc/internal.go b/fs/rc/internal.go
index 50e2765d3..10077ae09 100644
--- a/fs/rc/internal.go
+++ b/fs/rc/internal.go
@@ -4,6 +4,7 @@ package rc
import (
"context"
+ "fmt"
"net/http"
"os"
"os/exec"
@@ -12,7 +13,6 @@ import (
"time"
"github.com/coreos/go-semver/semver"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
@@ -60,7 +60,7 @@ Useful for testing error handling.`,
// Return an error regardless
func rcError(ctx context.Context, in Params) (out Params, err error) {
- return nil, errors.Errorf("arbitrary error on input %+v", in)
+ return nil, fmt.Errorf("arbitrary error on input %+v", in)
}
func init() {
@@ -422,7 +422,7 @@ func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
var httpResponse http.ResponseWriter
httpResponse, err = in.GetHTTPResponseWriter()
if err != nil {
- return nil, errors.Errorf("response object is required\n" + err.Error())
+ return nil, fmt.Errorf("response object is required\n" + err.Error())
}
var allArgs = []string{}
@@ -475,7 +475,7 @@ func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
cmd.Stdout = httpResponse
cmd.Stderr = httpResponse
} else {
- return nil, errors.Errorf("Unknown returnType %q", returnType)
+ return nil, fmt.Errorf("Unknown returnType %q", returnType)
}
err = cmd.Run()
diff --git a/fs/rc/jobs/job.go b/fs/rc/jobs/job.go
index 40a098400..e452fe241 100644
--- a/fs/rc/jobs/job.go
+++ b/fs/rc/jobs/job.go
@@ -4,13 +4,13 @@ package jobs
import (
"context"
+ "errors"
"fmt"
"runtime/debug"
"sync"
"sync/atomic"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
@@ -88,7 +88,7 @@ func (job *Job) removeListener(fn *func()) {
func (job *Job) run(ctx context.Context, fn rc.Func, in rc.Params) {
defer func() {
if r := recover(); r != nil {
- job.finish(nil, errors.Errorf("panic received: %v \n%s", r, string(debug.Stack())))
+ job.finish(nil, fmt.Errorf("panic received: %v \n%s", r, string(debug.Stack())))
}
}()
job.finish(fn(ctx, in))
@@ -352,7 +352,7 @@ func rcJobStatus(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
err = rc.Reshape(&out, job)
if err != nil {
- return nil, errors.Wrap(err, "reshape failed in job status")
+ return nil, fmt.Errorf("reshape failed in job status: %w", err)
}
return out, nil
}
diff --git a/fs/rc/jobs/job_test.go b/fs/rc/jobs/job_test.go
index 688ca04e8..3fcd4530b 100644
--- a/fs/rc/jobs/job_test.go
+++ b/fs/rc/jobs/job_test.go
@@ -2,11 +2,11 @@ package jobs
import (
"context"
+ "errors"
"runtime"
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
diff --git a/fs/rc/js/main.go b/fs/rc/js/main.go
index b5eb93cb7..eb536573e 100644
--- a/fs/rc/js/main.go
+++ b/fs/rc/js/main.go
@@ -10,12 +10,13 @@ package main
import (
"context"
"encoding/json"
+ "errors"
+ "fmt"
"log"
"net/http"
"runtime"
"syscall/js"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
@@ -54,10 +55,9 @@ func paramToValue(in rc.Params) (out js.Value) {
func errorValue(method string, in js.Value, err error) js.Value {
fs.Errorf(nil, "rc: %q: error: %v", method, err)
// Adjust the error return for some well known errors
- errOrig := errors.Cause(err)
status := http.StatusInternalServerError
switch {
- case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
+ case errors.Is(err, fs.ErrorDirNotFound) || errors.Is(err, fs.ErrorObjectNotFound):
status = http.StatusNotFound
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
status = http.StatusBadRequest
@@ -89,7 +89,7 @@ func rcCallback(this js.Value, args []js.Value) interface{} {
inJSON := jsJSON.Call("stringify", inRaw).String()
err := json.Unmarshal([]byte(inJSON), &in)
if err != nil {
- return errorValue(method, inRaw, errors.Wrap(err, "couldn't unmarshal input"))
+ return errorValue(method, inRaw, fmt.Errorf("couldn't unmarshal input: %w", err))
}
default:
return errorValue(method, inRaw, errors.New("in parameter must be null or object"))
@@ -97,12 +97,12 @@ func rcCallback(this js.Value, args []js.Value) interface{} {
call := rc.Calls.Get(method)
if call == nil {
- return errorValue(method, inRaw, errors.Errorf("method %q not found", method))
+ return errorValue(method, inRaw, fmt.Errorf("method %q not found", method))
}
out, err := call.Fn(ctx, in)
if err != nil {
- return errorValue(method, inRaw, errors.Wrap(err, "method call failed"))
+ return errorValue(method, inRaw, fmt.Errorf("method call failed: %w", err))
}
if out == nil {
return nil
@@ -110,7 +110,7 @@ func rcCallback(this js.Value, args []js.Value) interface{} {
var out2 map[string]interface{}
err = rc.Reshape(&out2, out)
if err != nil {
- return errorValue(method, inRaw, errors.Wrap(err, "result reshape failed"))
+ return errorValue(method, inRaw, fmt.Errorf("result reshape failed: %w", err))
}
return js.ValueOf(out2)
diff --git a/fs/rc/params.go b/fs/rc/params.go
index 23b8a64d8..a2eabb6f4 100644
--- a/fs/rc/params.go
+++ b/fs/rc/params.go
@@ -4,14 +4,13 @@ package rc
import (
"encoding/json"
+ "errors"
"fmt"
"math"
"net/http"
"strconv"
"time"
- "github.com/pkg/errors"
-
"github.com/rclone/rclone/fs"
)
@@ -75,11 +74,11 @@ func IsErrParamInvalid(err error) bool {
func Reshape(out interface{}, in interface{}) error {
b, err := json.Marshal(in)
if err != nil {
- return errors.Wrapf(err, "Reshape failed to Marshal")
+ return fmt.Errorf("Reshape failed to Marshal: %w", err)
}
err = json.Unmarshal(b, out)
if err != nil {
- return errors.Wrapf(err, "Reshape failed to Unmarshal")
+ return fmt.Errorf("Reshape failed to Unmarshal: %w", err)
}
return nil
}
@@ -117,7 +116,7 @@ func (p Params) GetHTTPRequest() (*http.Request, error) {
}
request, ok := value.(*http.Request)
if !ok {
- return nil, ErrParamInvalid{errors.Errorf("expecting http.request value for key %q (was %T)", key, value)}
+ return nil, ErrParamInvalid{fmt.Errorf("expecting http.request value for key %q (was %T)", key, value)}
}
return request, nil
}
@@ -134,7 +133,7 @@ func (p Params) GetHTTPResponseWriter() (http.ResponseWriter, error) {
}
request, ok := value.(http.ResponseWriter)
if !ok {
- return nil, ErrParamInvalid{errors.Errorf("expecting http.ResponseWriter value for key %q (was %T)", key, value)}
+ return nil, ErrParamInvalid{fmt.Errorf("expecting http.ResponseWriter value for key %q (was %T)", key, value)}
}
return request, nil
}
@@ -150,7 +149,7 @@ func (p Params) GetString(key string) (string, error) {
}
str, ok := value.(string)
if !ok {
- return "", ErrParamInvalid{errors.Errorf("expecting string value for key %q (was %T)", key, value)}
+ return "", ErrParamInvalid{fmt.Errorf("expecting string value for key %q (was %T)", key, value)}
}
return str, nil
}
@@ -171,17 +170,17 @@ func (p Params) GetInt64(key string) (int64, error) {
return x, nil
case float64:
if x > math.MaxInt64 || x < math.MinInt64 {
- return 0, ErrParamInvalid{errors.Errorf("key %q (%v) overflows int64 ", key, value)}
+ return 0, ErrParamInvalid{fmt.Errorf("key %q (%v) overflows int64 ", key, value)}
}
return int64(x), nil
case string:
i, err := strconv.ParseInt(x, 10, 0)
if err != nil {
- return 0, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as int64", key, value)}
+ return 0, ErrParamInvalid{fmt.Errorf("couldn't parse key %q (%v) as int64: %w", key, value, err)}
}
return i, nil
}
- return 0, ErrParamInvalid{errors.Errorf("expecting int64 value for key %q (was %T)", key, value)}
+ return 0, ErrParamInvalid{fmt.Errorf("expecting int64 value for key %q (was %T)", key, value)}
}
// GetFloat64 gets a float64 parameter from the input
@@ -203,11 +202,11 @@ func (p Params) GetFloat64(key string) (float64, error) {
case string:
f, err := strconv.ParseFloat(x, 64)
if err != nil {
- return 0, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as float64", key, value)}
+ return 0, ErrParamInvalid{fmt.Errorf("couldn't parse key %q (%v) as float64: %w", key, value, err)}
}
return f, nil
}
- return 0, ErrParamInvalid{errors.Errorf("expecting float64 value for key %q (was %T)", key, value)}
+ return 0, ErrParamInvalid{fmt.Errorf("expecting float64 value for key %q (was %T)", key, value)}
}
// GetBool gets a boolean parameter from the input
@@ -231,11 +230,11 @@ func (p Params) GetBool(key string) (bool, error) {
case string:
b, err := strconv.ParseBool(x)
if err != nil {
- return false, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as bool", key, value)}
+ return false, ErrParamInvalid{fmt.Errorf("couldn't parse key %q (%v) as bool: %w", key, value, err)}
}
return b, nil
}
- return false, ErrParamInvalid{errors.Errorf("expecting bool value for key %q (was %T)", key, value)}
+ return false, ErrParamInvalid{fmt.Errorf("expecting bool value for key %q (was %T)", key, value)}
}
// GetStruct gets a struct from key from the input into the struct
@@ -257,7 +256,7 @@ func (p Params) GetStruct(key string, out interface{}) error {
return nil
}
}
- return ErrParamInvalid{errors.Wrapf(err, "key %q", key)}
+ return ErrParamInvalid{fmt.Errorf("key %q: %w", key, err)}
}
return nil
}
@@ -280,7 +279,7 @@ func (p Params) GetDuration(key string) (time.Duration, error) {
}
duration, err := fs.ParseDuration(s)
if err != nil {
- return 0, ErrParamInvalid{errors.Wrap(err, "parse duration")}
+ return 0, ErrParamInvalid{fmt.Errorf("parse duration: %w", err)}
}
return duration, nil
}
@@ -292,9 +291,8 @@ func (p Params) GetDuration(key string) (time.Duration, error) {
// It returns a Params and an updated status code
func Error(path string, in Params, err error, status int) (Params, int) {
// Adjust the status code for some well known errors
- errOrig := errors.Cause(err)
switch {
- case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
+ case errors.Is(err, fs.ErrorDirNotFound) || errors.Is(err, fs.ErrorObjectNotFound):
status = http.StatusNotFound
case IsErrParamInvalid(err) || IsErrParamNotFound(err):
status = http.StatusBadRequest
diff --git a/fs/rc/params_test.go b/fs/rc/params_test.go
index 3006f08ce..26f3153d9 100644
--- a/fs/rc/params_test.go
+++ b/fs/rc/params_test.go
@@ -1,13 +1,13 @@
package rc
import (
+ "errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/fs/rc/rcserver/rcserver.go b/fs/rc/rcserver/rcserver.go
index 39498e388..3d7a79a5c 100644
--- a/fs/rc/rcserver/rcserver.go
+++ b/fs/rc/rcserver/rcserver.go
@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/rc/webgui"
- "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/skratchdot/open-golang/open"
@@ -138,7 +137,7 @@ func (s *Server) Serve() error {
if s.files != nil {
openURL, err := url.Parse(s.URL())
if err != nil {
- return errors.Wrap(err, "invalid serving URL")
+ return fmt.Errorf("invalid serving URL: %w", err)
}
// Add username, password into the URL if they are set
user, pass := s.opt.HTTPOptions.BasicUser, s.opt.HTTPOptions.BasicPass
@@ -211,7 +210,7 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
case "GET", "HEAD":
s.handleGet(w, r, path)
default:
- writeError(path, nil, w, errors.Errorf("method %q not allowed", r.Method), http.StatusMethodNotAllowed)
+ writeError(path, nil, w, fmt.Errorf("method %q not allowed", r.Method), http.StatusMethodNotAllowed)
return
}
}
@@ -225,7 +224,7 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string)
// Parse the POST and URL parameters into r.Form, for others r.Form will be empty value
err := r.ParseForm()
if err != nil {
- writeError(path, nil, w, errors.Wrap(err, "failed to parse form/URL parameters"), http.StatusBadRequest)
+ writeError(path, nil, w, fmt.Errorf("failed to parse form/URL parameters: %w", err), http.StatusBadRequest)
return
}
values = r.Form
@@ -243,20 +242,20 @@ func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string)
if contentType == "application/json" {
err := json.NewDecoder(r.Body).Decode(&in)
if err != nil {
- writeError(path, in, w, errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
+ writeError(path, in, w, fmt.Errorf("failed to read input JSON: %w", err), http.StatusBadRequest)
return
}
}
// Find the call
call := rc.Calls.Get(path)
if call == nil {
- writeError(path, in, w, errors.Errorf("couldn't find method %q", path), http.StatusNotFound)
+ writeError(path, in, w, fmt.Errorf("couldn't find method %q", path), http.StatusNotFound)
return
}
// Check to see if it requires authorisation
if !s.opt.NoAuth && call.AuthRequired && !s.UsingAuth() {
- writeError(path, in, w, errors.Errorf("authentication must be set up on the rc server to use %q or the --rc-no-auth flag must be in use", path), http.StatusForbidden)
+ writeError(path, in, w, fmt.Errorf("authentication must be set up on the rc server to use %q or the --rc-no-auth flag must be in use", path), http.StatusForbidden)
return
}
@@ -317,14 +316,14 @@ func (s *Server) serveRoot(w http.ResponseWriter, r *http.Request) {
func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string, fsName string) {
f, err := cache.Get(s.ctx, fsName)
if err != nil {
- writeError(path, nil, w, errors.Wrap(err, "failed to make Fs"), http.StatusInternalServerError)
+ writeError(path, nil, w, fmt.Errorf("failed to make Fs: %w", err), http.StatusInternalServerError)
return
}
if path == "" || strings.HasSuffix(path, "/") {
path = strings.Trim(path, "/")
entries, err := list.DirSorted(r.Context(), f, false, path)
if err != nil {
- writeError(path, nil, w, errors.Wrap(err, "failed to list directory"), http.StatusInternalServerError)
+ writeError(path, nil, w, fmt.Errorf("failed to list directory: %w", err), http.StatusInternalServerError)
return
}
// Make the entries for display
@@ -343,7 +342,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string
path = strings.Trim(path, "/")
o, err := f.NewObject(r.Context(), path)
if err != nil {
- writeError(path, nil, w, errors.Wrap(err, "failed to find object"), http.StatusInternalServerError)
+ writeError(path, nil, w, fmt.Errorf("failed to find object: %w", err), http.StatusInternalServerError)
return
}
serve.Object(w, r, o)
diff --git a/fs/rc/webgui/plugins.go b/fs/rc/webgui/plugins.go
index 7bc22be54..3a97fd679 100644
--- a/fs/rc/webgui/plugins.go
+++ b/fs/rc/webgui/plugins.go
@@ -2,6 +2,7 @@ package webgui
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -16,7 +17,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/rc/rcflags"
- "github.com/rclone/rclone/lib/errors"
)
// PackageJSON is the structure of package.json of a plugin
diff --git a/fs/rc/webgui/webgui.go b/fs/rc/webgui/webgui.go
index a272d73e6..ea37382ee 100644
--- a/fs/rc/webgui/webgui.go
+++ b/fs/rc/webgui/webgui.go
@@ -5,6 +5,7 @@ package webgui
import (
"archive/zip"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -15,7 +16,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/file"
)
@@ -24,15 +24,15 @@ import (
func GetLatestReleaseURL(fetchURL string) (string, string, int, error) {
resp, err := http.Get(fetchURL)
if err != nil {
- return "", "", 0, errors.Wrap(err, "failed getting latest release of rclone-webui")
+ return "", "", 0, fmt.Errorf("failed getting latest release of rclone-webui: %w", err)
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode != http.StatusOK {
- return "", "", 0, errors.Errorf("bad HTTP status %d (%s) when fetching %s", resp.StatusCode, resp.Status, fetchURL)
+ return "", "", 0, fmt.Errorf("bad HTTP status %d (%s) when fetching %s", resp.StatusCode, resp.Status, fetchURL)
}
results := gitHubRequest{}
if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
- return "", "", 0, errors.Wrap(err, "could not decode results from http request")
+ return "", "", 0, fmt.Errorf("could not decode results from http request: %w", err)
}
if len(results.Assets) < 1 {
return "", "", 0, errors.New("could not find an asset in the release. " +
@@ -63,7 +63,7 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
// Get the latest release details
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
if err != nil {
- return errors.Wrap(err, "Error checking for web gui release update, skipping update")
+ return fmt.Errorf("Error checking for web gui release update, skipping update: %w", err)
}
dat, err := ioutil.ReadFile(tagPath)
tagsMatch := false
@@ -150,7 +150,7 @@ func DownloadFile(filepath string, url string) (err error) {
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode != http.StatusOK {
- return errors.Errorf("bad HTTP status %d (%s) when fetching %s", resp.StatusCode, resp.Status, url)
+ return fmt.Errorf("bad HTTP status %d (%s) when fetching %s", resp.StatusCode, resp.Status, url)
}
// Create the file
diff --git a/fs/registry.go b/fs/registry.go
index 24b4772bf..7ca447372 100644
--- a/fs/registry.go
+++ b/fs/registry.go
@@ -11,7 +11,6 @@ import (
"sort"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
)
@@ -260,7 +259,7 @@ func Find(name string) (*RegInfo, error) {
return item, nil
}
}
- return nil, errors.Errorf("didn't find backend called %q", name)
+ return nil, fmt.Errorf("didn't find backend called %q", name)
}
// MustFind looks for an Info object for the type name passed in
diff --git a/fs/sizesuffix.go b/fs/sizesuffix.go
index eda3c2368..04056b707 100644
--- a/fs/sizesuffix.go
+++ b/fs/sizesuffix.go
@@ -3,13 +3,12 @@ package fs
// SizeSuffix is parsed by flag with K/M/G binary suffixes
import (
"encoding/json"
+ "errors"
"fmt"
"math"
"sort"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
// SizeSuffix is an int64 with a friendly way of printing setting
@@ -153,7 +152,7 @@ func (x *SizeSuffix) Set(s string) error {
suffix = s[len(s)-3]
suffixLen = 3
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
- return errors.Errorf("bad suffix %q", suffix)
+ return fmt.Errorf("bad suffix %q", suffix)
}
// Could also support SI form MB, and treat it equivalent to MiB, but perhaps better to reserve it for CountSuffix?
//} else if len(s) > 1 {
@@ -172,11 +171,11 @@ func (x *SizeSuffix) Set(s string) error {
multiplierFound, multiplier = x.multiplierFromSymbol(suffix)
}
if !multiplierFound {
- return errors.Errorf("bad suffix %q", suffix)
+ return fmt.Errorf("bad suffix %q", suffix)
}
default:
if multiplierFound, multiplier = x.multiplierFromSymbol(suffix); !multiplierFound {
- return errors.Errorf("bad suffix %q", suffix)
+ return fmt.Errorf("bad suffix %q", suffix)
}
}
s = s[:len(s)-suffixLen]
@@ -185,7 +184,7 @@ func (x *SizeSuffix) Set(s string) error {
return err
}
if value < 0 {
- return errors.Errorf("size can't be negative %q", s)
+ return fmt.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
diff --git a/fs/sync/pipe.go b/fs/sync/pipe.go
index d28527e7b..a2aa701bc 100644
--- a/fs/sync/pipe.go
+++ b/fs/sync/pipe.go
@@ -2,13 +2,13 @@ package sync
import (
"context"
+ "fmt"
"math/bits"
"strconv"
"strings"
"sync"
"github.com/aalpar/deheap"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
)
@@ -201,7 +201,7 @@ func newLess(orderBy string) (less lessFn, fraction int, err error) {
return a.Src.ModTime(ctx).Before(b.Src.ModTime(ctx))
}
default:
- return nil, fraction, errors.Errorf("unknown --order-by comparison %q", parts[0])
+ return nil, fraction, fmt.Errorf("unknown --order-by comparison %q", parts[0])
}
descending := false
if len(parts) > 1 {
@@ -214,16 +214,16 @@ func newLess(orderBy string) (less lessFn, fraction int, err error) {
if len(parts) > 2 {
fraction, err = strconv.Atoi(parts[2])
if err != nil {
- return nil, fraction, errors.Errorf("bad mixed fraction --order-by %q", parts[2])
+ return nil, fraction, fmt.Errorf("bad mixed fraction --order-by %q", parts[2])
}
}
default:
- return nil, fraction, errors.Errorf("unknown --order-by sort direction %q", parts[1])
+ return nil, fraction, fmt.Errorf("unknown --order-by sort direction %q", parts[1])
}
}
if (fraction >= 0 && len(parts) > 3) || (fraction < 0 && len(parts) > 2) {
- return nil, fraction, errors.Errorf("bad --order-by string %q", orderBy)
+ return nil, fraction, fmt.Errorf("bad --order-by string %q", orderBy)
}
if descending {
oldLess := less
diff --git a/fs/sync/sync.go b/fs/sync/sync.go
index 96290e0a1..4c9e0f959 100644
--- a/fs/sync/sync.go
+++ b/fs/sync/sync.go
@@ -3,6 +3,7 @@ package sync
import (
"context"
+ "errors"
"fmt"
"path"
"sort"
@@ -10,7 +11,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
@@ -645,7 +645,7 @@ func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy
case "size":
// ignore
default:
- return strategy, errors.Errorf("unknown track renames strategy %q", s)
+ return strategy, fmt.Errorf("unknown track renames strategy %q", s)
}
}
return strategy, nil
diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go
index c5724a9cc..d3f2e66b9 100644
--- a/fs/sync/sync_test.go
+++ b/fs/sync/sync_test.go
@@ -4,13 +4,13 @@ package sync
import (
"context"
+ "errors"
"fmt"
"runtime"
"strings"
"testing"
"time"
- "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/all" // import all backends
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -1059,7 +1059,7 @@ func TestSyncWithMaxDuration(t *testing.T) {
accounting.GlobalStats().ResetCounters()
startTime := time.Now()
err := Sync(ctx, r.Fremote, r.Flocal, false)
- require.Equal(t, context.DeadlineExceeded, errors.Cause(err))
+ require.True(t, errors.Is(err, context.DeadlineExceeded))
elapsed := time.Since(startTime)
maxTransferTime := (time.Duration(len(testFiles)) * 60 * time.Second) / time.Duration(bytesPerSecond)
@@ -2091,7 +2091,7 @@ func testSyncConcurrent(t *testing.T, subtest string) {
fstest.CheckItems(t, r.Fremote, itemsBefore...)
stats.ResetErrors()
err := Sync(ctx, r.Fremote, r.Flocal, false)
- if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles {
+ if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
t.Skipf("Skip test because remote cannot upload empty files")
}
assert.NoError(t, err, "Sync must not return a error")
diff --git a/fs/tristate.go b/fs/tristate.go
index d35980e03..d03926a07 100644
--- a/fs/tristate.go
+++ b/fs/tristate.go
@@ -5,8 +5,6 @@ import (
"fmt"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
// Tristate is a boolean that can has the states, true, false and
@@ -36,7 +34,7 @@ func (t *Tristate) Set(s string) error {
}
value, err := strconv.ParseBool(s)
if err != nil {
- return errors.Wrapf(err, "failed to parse Tristate %q", s)
+ return fmt.Errorf("failed to parse Tristate %q: %w", s, err)
}
t.Value = value
t.Valid = true
diff --git a/fs/walk/walk.go b/fs/walk/walk.go
index f3d76f298..56418b522 100644
--- a/fs/walk/walk.go
+++ b/fs/walk/walk.go
@@ -3,13 +3,14 @@ package walk
import (
"context"
+ "errors"
+ "fmt"
"path"
"sort"
"strings"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/filter"
@@ -244,7 +245,7 @@ func (dm *dirMap) addEntries(entries fs.DirEntries) error {
case fs.Directory:
dm.add(x.Remote(), true)
default:
- return errors.Errorf("unknown object type %T", entry)
+ return fmt.Errorf("unknown object type %T", entry)
}
}
return nil
@@ -315,7 +316,7 @@ func listR(ctx context.Context, f fs.Fs, path string, includeAll bool, listType
return err
}
default:
- return errors.Errorf("unknown object type %T", entry)
+ return fmt.Errorf("unknown object type %T", entry)
}
if include {
filteredEntries = append(filteredEntries, entry)
@@ -514,7 +515,7 @@ func walkRDirTree(ctx context.Context, f fs.Fs, startPath string, includeAll boo
fs.Debugf(x, "Excluded from sync (and deletion)")
}
default:
- return errors.Errorf("unknown object type %T", entry)
+ return fmt.Errorf("unknown object type %T", entry)
}
}
return nil
diff --git a/fs/walk/walk_test.go b/fs/walk/walk_test.go
index c8da2e7a3..79ccbf373 100644
--- a/fs/walk/walk_test.go
+++ b/fs/walk/walk_test.go
@@ -2,13 +2,13 @@ package walk
import (
"context"
+ "errors"
"fmt"
"io"
"strings"
"sync"
"testing"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
_ "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go
index 2fb0ba4f4..3c9984926 100644
--- a/fstest/fstests/fstests.go
+++ b/fstest/fstests/fstests.go
@@ -9,6 +9,7 @@ package fstests
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -23,7 +24,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fserrors"
@@ -874,7 +874,7 @@ func Run(t *testing.T, opt *Opt) {
var objNames, dirNames []string
for i := 1; i <= *fstest.ListRetries; i++ {
objs, dirs, err := walk.GetAll(ctx, f, dir, true, 1)
- if errors.Cause(err) == fs.ErrorDirNotFound {
+ if errors.Is(err, fs.ErrorDirNotFound) {
objs, dirs, err = walk.GetAll(ctx, f, dir, true, 1)
}
require.NoError(t, err)
@@ -1223,7 +1223,7 @@ func Run(t *testing.T, opt *Opt) {
// check remotes
// remote should not exist here
_, err = f.List(ctx, "")
- assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err))
+ assert.True(t, errors.Is(err, fs.ErrorDirNotFound))
//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
file1Copy := file1
file1Copy.Path = path.Join(newName, file1.Path)
@@ -1618,7 +1618,7 @@ func Run(t *testing.T, opt *Opt) {
// sharing directory for the first time
path := path.Dir(file2.Path)
link3, err := doPublicLink(ctx, path, expiry, false)
- if err != nil && (errors.Cause(err) == fs.ErrorCantShareDirectories || errors.Cause(err) == fs.ErrorObjectNotFound) {
+ if err != nil && (errors.Is(err, fs.ErrorCantShareDirectories) || errors.Is(err, fs.ErrorObjectNotFound)) {
t.Log("skipping directory tests as not supported on this backend")
} else {
require.NoError(t, err)
@@ -1953,7 +1953,7 @@ func Run(t *testing.T, opt *Opt) {
// Purge the folder
err = operations.Purge(ctx, f, "")
- if errors.Cause(err) != fs.ErrorDirNotFound {
+ if !errors.Is(err, fs.ErrorDirNotFound) {
require.NoError(t, err)
}
purged = true
@@ -1963,7 +1963,7 @@ func Run(t *testing.T, opt *Opt) {
if !isBucketBasedButNotRoot(f) {
err = operations.Purge(ctx, f, "")
assert.Error(t, err, "Expecting error after on second purge")
- if errors.Cause(err) != fs.ErrorDirNotFound {
+ if !errors.Is(err, fs.ErrorDirNotFound) {
t.Log("Warning: this should produce fs.ErrorDirNotFound")
}
}
diff --git a/fstest/test_all/clean.go b/fstest/test_all/clean.go
index 7cda57093..c4d22a8a4 100644
--- a/fstest/test_all/clean.go
+++ b/fstest/test_all/clean.go
@@ -4,10 +4,10 @@ package main
import (
"context"
+ "fmt"
"log"
"regexp"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
@@ -48,14 +48,14 @@ func cleanFs(ctx context.Context, remote string, cleanup bool) error {
log.Printf("Purging %s", fullPath)
dir, err := fs.NewFs(context.Background(), fullPath)
if err != nil {
- err = errors.Wrap(err, "NewFs failed")
+ err = fmt.Errorf("NewFs failed: %w", err)
lastErr = err
fs.Errorf(fullPath, "%v", err)
return nil
}
err = operations.Purge(ctx, dir, "")
if err != nil {
- err = errors.Wrap(err, "Purge failed")
+ err = fmt.Errorf("Purge failed: %w", err)
lastErr = err
fs.Errorf(dir, "%v", err)
return nil
diff --git a/fstest/test_all/config.go b/fstest/test_all/config.go
index 31a55a2a5..62d8c057c 100644
--- a/fstest/test_all/config.go
+++ b/fstest/test_all/config.go
@@ -3,11 +3,11 @@
package main
import (
+ "fmt"
"io/ioutil"
"log"
"path"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
yaml "gopkg.in/yaml.v2"
)
@@ -112,12 +112,12 @@ type Config struct {
func NewConfig(configFile string) (*Config, error) {
d, err := ioutil.ReadFile(configFile)
if err != nil {
- return nil, errors.Wrap(err, "failed to read config file")
+ return nil, fmt.Errorf("failed to read config file: %w", err)
}
config := &Config{}
err = yaml.Unmarshal(d, &config)
if err != nil {
- return nil, errors.Wrap(err, "failed to parse config file")
+ return nil, fmt.Errorf("failed to parse config file: %w", err)
}
// d, err = yaml.Marshal(&config)
// if err != nil {
diff --git a/fstest/testserver/testserver.go b/fstest/testserver/testserver.go
index bdcf05131..005096e85 100644
--- a/fstest/testserver/testserver.go
+++ b/fstest/testserver/testserver.go
@@ -3,6 +3,7 @@ package testserver
import (
"bytes"
+ "errors"
"fmt"
"net"
"os"
@@ -13,7 +14,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fspath"
)
@@ -52,7 +52,7 @@ func run(name, command string) (out []byte, err error) {
cmd := exec.Command(cmdPath, command)
out, err = cmd.CombinedOutput()
if err != nil {
- err = errors.Wrapf(err, "failed to run %s %s\n%s", cmdPath, command, string(out))
+ err = fmt.Errorf("failed to run %s %s\n%s: %w", cmdPath, command, string(out), err)
}
return out, err
}
@@ -112,7 +112,7 @@ func start(name string) error {
}
time.Sleep(time.Second)
}
- return errors.Errorf("failed to connect to %q on %q", name, connect)
+ return fmt.Errorf("failed to connect to %q on %q", name, connect)
}
// Start starts the named test server which can be stopped by the
diff --git a/go.mod b/go.mod
index b8c0219cf..74f897418 100644
--- a/go.mod
+++ b/go.mod
@@ -50,7 +50,6 @@ require (
github.com/ncw/swift/v2 v2.0.1
github.com/nsf/termbox-go v1.1.1
github.com/patrickmn/go-cache v2.1.0+incompatible
- github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.2
github.com/pmezard/go-difflib v1.0.0
github.com/prometheus/client_golang v1.11.0
diff --git a/lib/daemonize/daemon_other.go b/lib/daemonize/daemon_other.go
index 70e4a6df5..788d034e0 100644
--- a/lib/daemonize/daemon_other.go
+++ b/lib/daemonize/daemon_other.go
@@ -6,13 +6,12 @@
package daemonize
import (
+ "fmt"
"os"
"runtime"
-
- "github.com/pkg/errors"
)
// StartDaemon runs background twin of current process.
func StartDaemon(args []string) (*os.Process, error) {
- return nil, errors.Errorf("background mode is not supported on %s platform", runtime.GOOS)
+ return nil, fmt.Errorf("background mode is not supported on %s platform", runtime.GOOS)
}
diff --git a/lib/dircache/dircache.go b/lib/dircache/dircache.go
index a8c9d3a6f..05766335d 100644
--- a/lib/dircache/dircache.go
+++ b/lib/dircache/dircache.go
@@ -7,12 +7,12 @@ package dircache
import (
"bytes"
"context"
+ "errors"
"fmt"
"path"
"strings"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -241,7 +241,7 @@ func (dc *DirCache) _findDir(ctx context.Context, path string, create bool) (pat
if create {
pathID, err = dc.fs.CreateDir(ctx, parentPathID, leaf)
if err != nil {
- return "", errors.Wrap(err, "failed to make directory")
+ return "", fmt.Errorf("failed to make directory: %w", err)
}
} else {
return "", fs.ErrorDirNotFound
diff --git a/lib/errors/errors.go b/lib/errors/errors.go
index 6338440c4..2d9c6b93f 100644
--- a/lib/errors/errors.go
+++ b/lib/errors/errors.go
@@ -1,22 +1,9 @@
package errors
import (
- "errors"
- "fmt"
"reflect"
)
-// New returns an error that formats as the given text.
-func New(text string) error {
- return errors.New(text)
-}
-
-// Errorf formats according to a format specifier and returns the string
-// as a value that satisfies error.
-func Errorf(format string, a ...interface{}) error {
- return fmt.Errorf(format, a...)
-}
-
// WalkFunc is the signature of the Walk callback function. The function gets the
// current error in the chain and should return true if the chain processing
// should be aborted.
@@ -27,7 +14,6 @@ type WalkFunc func(error) bool
// is stopped and no further calls will be made.
//
// The next error in the chain is determined by the following rules:
-// - If the current error has a `Cause() error` method (github.com/pkg/errors),
// the return value of this method is used.
// - If the current error has a `Unwrap() error` method (golang.org/x/xerrors),
// the return value of this method is used.
diff --git a/lib/errors/errors_test.go b/lib/errors/errors_test.go
index 5ef92c683..c208a5a2e 100644
--- a/lib/errors/errors_test.go
+++ b/lib/errors/errors_test.go
@@ -1,12 +1,13 @@
package errors_test
import (
+ "errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
- "github.com/rclone/rclone/lib/errors"
+ liberrors "github.com/rclone/rclone/lib/errors"
)
func TestWalk(t *testing.T) {
@@ -43,7 +44,7 @@ func TestWalk(t *testing.T) {
} {
var last error
calls := 0
- errors.Walk(test.err, func(err error) bool {
+ liberrors.Walk(test.err, func(err error) bool {
calls++
last = err
_, stop := err.(stopError)
diff --git a/lib/file/preallocate_windows.go b/lib/file/preallocate_windows.go
index afa33fe4b..49352356a 100644
--- a/lib/file/preallocate_windows.go
+++ b/lib/file/preallocate_windows.go
@@ -4,12 +4,12 @@
package file
import (
+ "fmt"
"os"
"sync"
"syscall"
"unsafe"
- "github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@@ -63,13 +63,13 @@ func PreAllocate(size int64, out *os.File) error {
uintptr(3), // FileFsSizeInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
- return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
+ return fmt.Errorf("preAllocate NtQueryVolumeInformationFile failed: %w", e1)
}
// Calculate the allocation size
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
if clusterSize <= 0 {
- return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
+ return fmt.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
}
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
@@ -85,7 +85,7 @@ func PreAllocate(size int64, out *os.File) error {
if e1 == syscall.Errno(windows.ERROR_DISK_FULL) || e1 == syscall.Errno(windows.ERROR_HANDLE_DISK_FULL) {
return ErrDiskFull
}
- return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
+ return fmt.Errorf("preAllocate NtSetInformationFile failed: %w", e1)
}
return nil
@@ -104,7 +104,7 @@ func SetSparse(out *os.File) error {
var bytesReturned uint32
err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, &bytesReturned, nil)
if err != nil {
- return errors.Wrap(err, "DeviceIoControl FSCTL_SET_SPARSE")
+ return fmt.Errorf("DeviceIoControl FSCTL_SET_SPARSE: %w", err)
}
return nil
}
diff --git a/lib/http/http.go b/lib/http/http.go
index abf3b76d4..d6ff1452a 100644
--- a/lib/http/http.go
+++ b/lib/http/http.go
@@ -5,6 +5,7 @@ import (
"context"
"crypto/tls"
"crypto/x509"
+ "errors"
"fmt"
"io/ioutil"
"log"
@@ -15,7 +16,6 @@ import (
"time"
"github.com/go-chi/chi/v5"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
diff --git a/lib/jwtutil/jwtutil.go b/lib/jwtutil/jwtutil.go
index f99b36b97..8473c45bd 100644
--- a/lib/jwtutil/jwtutil.go
+++ b/lib/jwtutil/jwtutil.go
@@ -6,13 +6,14 @@ import (
"crypto/rsa"
"encoding/hex"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/lib/oauthutil"
@@ -34,11 +35,11 @@ func RandomHex(n int) (string, error) {
func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryParams map[string]string, privateKey *rsa.PrivateKey, m configmap.Mapper, client *http.Client) (err error) {
payload, err := jws.Encode(header, claims, privateKey)
if err != nil {
- return errors.Wrap(err, "jwtutil: failed to encode payload")
+ return fmt.Errorf("jwtutil: failed to encode payload: %w", err)
}
req, err := http.NewRequest("POST", claims.Aud, nil)
if err != nil {
- return errors.Wrap(err, "jwtutil: failed to create new request")
+ return fmt.Errorf("jwtutil: failed to create new request: %w", err)
}
q := req.URL.Query()
q.Add("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer")
@@ -50,13 +51,13 @@ func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryPara
req, err = http.NewRequest("POST", claims.Aud, bytes.NewBuffer([]byte(queryString)))
if err != nil {
- return errors.Wrap(err, "jwtutil: failed to create new request")
+ return fmt.Errorf("jwtutil: failed to create new request: %w", err)
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := client.Do(req)
if err != nil {
- return errors.Wrap(err, "jwtutil: failed making auth request")
+ return fmt.Errorf("jwtutil: failed making auth request: %w", err)
}
s, err := bodyToString(resp.Body)
@@ -65,12 +66,12 @@ func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryPara
}
if resp.StatusCode != 200 {
err = errors.New(resp.Status)
- return errors.Wrap(err, "jwtutil: failed making auth request")
+ return fmt.Errorf("jwtutil: failed making auth request: %w", err)
}
defer func() {
deferedErr := resp.Body.Close()
if deferedErr != nil {
- err = errors.Wrap(err, "jwtutil: failed to close resp.Body")
+ err = fmt.Errorf("jwtutil: failed to close resp.Body: %w", err)
}
}()
@@ -80,7 +81,7 @@ func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryPara
err = errors.New("No AccessToken in Response")
}
if err != nil {
- return errors.Wrap(err, "jwtutil: failed to get token")
+ return fmt.Errorf("jwtutil: failed to get token: %w", err)
}
token := &oauth2.Token{
AccessToken: result.AccessToken,
diff --git a/lib/kv/bolt.go b/lib/kv/bolt.go
index 4b0efc7dc..5f9f3d7a6 100644
--- a/lib/kv/bolt.go
+++ b/lib/kv/bolt.go
@@ -5,13 +5,13 @@ package kv
import (
"context"
+ "fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
@@ -101,7 +101,7 @@ func Start(ctx context.Context, facility string, f fs.Fs) (*DB, error) {
}
if err = db.open(ctx, false); err != nil && err != ErrEmpty {
- return nil, errors.Wrapf(err, "cannot open db: %s", db.path)
+ return nil, fmt.Errorf("cannot open db: %s: %w", db.path, err)
}
dbMap[name] = db
diff --git a/lib/kv/types.go b/lib/kv/types.go
index f21957976..601dd21da 100644
--- a/lib/kv/types.go
+++ b/lib/kv/types.go
@@ -2,8 +2,7 @@ package kv
import (
"context"
-
- "github.com/pkg/errors"
+ "errors"
)
// package errors
diff --git a/lib/mmap/mmap_unix.go b/lib/mmap/mmap_unix.go
index f5c7f40c2..be7cf195c 100644
--- a/lib/mmap/mmap_unix.go
+++ b/lib/mmap/mmap_unix.go
@@ -7,7 +7,8 @@
package mmap
import (
- "github.com/pkg/errors"
+ "fmt"
+
"golang.org/x/sys/unix"
)
@@ -17,7 +18,7 @@ import (
func Alloc(size int) ([]byte, error) {
mem, err := unix.Mmap(-1, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANON)
if err != nil {
- return nil, errors.Wrap(err, "mmap: failed to allocate memory for buffer")
+ return nil, fmt.Errorf("mmap: failed to allocate memory for buffer: %w", err)
}
return mem, nil
}
@@ -28,7 +29,7 @@ func Alloc(size int) ([]byte, error) {
func Free(mem []byte) error {
err := unix.Munmap(mem)
if err != nil {
- return errors.Wrap(err, "mmap: failed to unmap memory")
+ return fmt.Errorf("mmap: failed to unmap memory: %w", err)
}
return nil
}
diff --git a/lib/mmap/mmap_windows.go b/lib/mmap/mmap_windows.go
index ce57aadef..5c45ac89b 100644
--- a/lib/mmap/mmap_windows.go
+++ b/lib/mmap/mmap_windows.go
@@ -7,10 +7,10 @@
package mmap
import (
+ "fmt"
"reflect"
"unsafe"
- "github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@@ -20,7 +20,7 @@ import (
func Alloc(size int) ([]byte, error) {
p, err := windows.VirtualAlloc(0, uintptr(size), windows.MEM_COMMIT, windows.PAGE_READWRITE)
if err != nil {
- return nil, errors.Wrap(err, "mmap: failed to allocate memory for buffer")
+ return nil, fmt.Errorf("mmap: failed to allocate memory for buffer: %w", err)
}
var mem []byte
sh := (*reflect.SliceHeader)(unsafe.Pointer(&mem))
@@ -37,7 +37,7 @@ func Free(mem []byte) error {
sh := (*reflect.SliceHeader)(unsafe.Pointer(&mem))
err := windows.VirtualFree(sh.Data, 0, windows.MEM_RELEASE)
if err != nil {
- return errors.Wrap(err, "mmap: failed to unmap memory")
+ return fmt.Errorf("mmap: failed to unmap memory: %w", err)
}
return nil
}
diff --git a/lib/oauthutil/oauthutil.go b/lib/oauthutil/oauthutil.go
index fba29a937..2203a0e47 100644
--- a/lib/oauthutil/oauthutil.go
+++ b/lib/oauthutil/oauthutil.go
@@ -3,6 +3,7 @@ package oauthutil
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"html/template"
"net"
@@ -12,7 +13,6 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -110,7 +110,7 @@ type oldToken struct {
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
tokenString, ok := m.Get(config.ConfigToken)
if !ok || tokenString == "" {
- return nil, errors.Errorf("empty token found - please run \"rclone config reconnect %s:\"", name)
+ return nil, fmt.Errorf("empty token found - please run \"rclone config reconnect %s:\"", name)
}
token := new(oauth2.Token)
err := json.Unmarshal([]byte(tokenString), token)
@@ -245,7 +245,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
time.Sleep(1 * time.Second)
}
if err != nil {
- return nil, errors.Wrapf(err, "couldn't fetch token - maybe it has expired? - refresh with \"rclone config reconnect %s:\"", ts.name)
+ return nil, fmt.Errorf("couldn't fetch token - maybe it has expired? - refresh with \"rclone config reconnect %s:\": %w", ts.name, err)
}
changed = changed || (*token != *ts.token)
ts.token = token
@@ -256,7 +256,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
}
err = PutToken(ts.name, ts.m, token, false)
if err != nil {
- return nil, errors.Wrap(err, "couldn't store token")
+ return nil, fmt.Errorf("couldn't store token: %w", err)
}
}
return token, nil
@@ -442,7 +442,7 @@ func ConfigOAuth(ctx context.Context, name string, m configmap.Mapper, ri *fs.Re
}
opt, ok := out.OAuth.(*Options)
if !ok {
- return nil, errors.Errorf("internal error: oauth failed: wrong type in config: %T", out.OAuth)
+ return nil, fmt.Errorf("internal error: oauth failed: wrong type in config: %T", out.OAuth)
}
if opt.OAuth2Config == nil {
return nil, errors.New("internal error: oauth failed: OAuth2Config not set")
@@ -499,7 +499,7 @@ version recommended):
// Encode them into a string
mCopyString, err := inM.Encode()
if err != nil {
- return nil, errors.Wrap(err, "oauthutil authorize encode")
+ return nil, fmt.Errorf("oauthutil authorize encode: %w", err)
}
// Write what the user has to do
if len(mCopyString) > 0 {
@@ -547,7 +547,7 @@ version recommended):
oauthConfig = fixRedirect(oauthConfig)
code, err = configSetup(ctx, ri.Name, name, m, oauthConfig, opt)
if err != nil {
- return nil, errors.Wrap(err, "config failed to refresh token")
+ return nil, fmt.Errorf("config failed to refresh token: %w", err)
}
}
err = configExchange(ctx, name, m, oauthConfig, code)
@@ -560,7 +560,7 @@ version recommended):
_, returnState := fs.StatePop(stateParams)
return fs.ConfigGoto(returnState)
}
- return nil, errors.Errorf("unknown internal oauth state %q", state)
+ return nil, fmt.Errorf("unknown internal oauth state %q", state)
}
func init() {
@@ -626,7 +626,7 @@ func configSetup(ctx context.Context, id, name string, m configmap.Mapper, oauth
server := newAuthServer(opt, bindAddress, state, authURL)
err = server.Init()
if err != nil {
- return "", errors.Wrap(err, "failed to start auth webserver")
+ return "", fmt.Errorf("failed to start auth webserver: %w", err)
}
go server.Serve()
defer server.Stop()
@@ -662,7 +662,7 @@ func configExchange(ctx context.Context, name string, m configmap.Mapper, oauthC
ctx = Context(ctx, fshttp.NewClient(ctx))
token, err := oauthConfig.Exchange(ctx, code)
if err != nil {
- return errors.Wrap(err, "failed to get token")
+ return fmt.Errorf("failed to get token: %w", err)
}
return PutToken(name, m, token, true)
}
diff --git a/lib/pacer/pacer.go b/lib/pacer/pacer.go
index dffa803f9..0257d70da 100644
--- a/lib/pacer/pacer.go
+++ b/lib/pacer/pacer.go
@@ -5,7 +5,7 @@ import (
"sync"
"time"
- "github.com/rclone/rclone/lib/errors"
+ liberrors "github.com/rclone/rclone/lib/errors"
)
// State represents the public Pacer state that will be passed to the
@@ -253,7 +253,7 @@ func RetryAfterError(err error, retryAfter time.Duration) error {
// IsRetryAfter returns true if the error or any of it's Cause's is an error
// returned by RetryAfterError. It also returns the associated Duration if possible.
func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) {
- errors.Walk(err, func(err error) bool {
+ liberrors.Walk(err, func(err error) bool {
if r, ok := err.(*retryAfterError); ok {
retryAfter, isRetryAfter = r.retryAfter, true
return true
diff --git a/lib/pacer/pacer_test.go b/lib/pacer/pacer_test.go
index 6159acd4e..11aebb4ab 100644
--- a/lib/pacer/pacer_test.go
+++ b/lib/pacer/pacer_test.go
@@ -1,11 +1,11 @@
package pacer
import (
+ "errors"
"sync"
"testing"
"time"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
diff --git a/lib/random/random.go b/lib/random/random.go
index 30198bdd4..08e752b84 100644
--- a/lib/random/random.go
+++ b/lib/random/random.go
@@ -5,9 +5,8 @@ import (
cryptorand "crypto/rand"
"encoding/base64"
"encoding/binary"
+ "fmt"
mathrand "math/rand"
-
- "github.com/pkg/errors"
)
// StringFn create a random string for test purposes using the random
@@ -53,10 +52,10 @@ func Password(bits int) (password string, err error) {
var pw = make([]byte, bytes)
n, err := cryptorand.Read(pw)
if err != nil {
- return "", errors.Wrap(err, "password read failed")
+ return "", fmt.Errorf("password read failed: %w", err)
}
if n != bytes {
- return "", errors.Errorf("password short read: %d", n)
+ return "", fmt.Errorf("password short read: %d", n)
}
password = base64.RawURLEncoding.EncodeToString(pw)
return password, nil
@@ -72,7 +71,7 @@ func Seed() error {
var seed int64
err := binary.Read(cryptorand.Reader, binary.LittleEndian, &seed)
if err != nil {
- return errors.Wrap(err, "failed to read random seed")
+ return fmt.Errorf("failed to read random seed: %w", err)
}
mathrand.Seed(seed)
return nil
diff --git a/lib/readers/error_test.go b/lib/readers/error_test.go
index 0231618b9..e8c73f980 100644
--- a/lib/readers/error_test.go
+++ b/lib/readers/error_test.go
@@ -1,9 +1,9 @@
package readers
import (
+ "errors"
"testing"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
diff --git a/lib/readers/noclose_test.go b/lib/readers/noclose_test.go
index da784425c..e954d9c72 100644
--- a/lib/readers/noclose_test.go
+++ b/lib/readers/noclose_test.go
@@ -1,10 +1,10 @@
package readers
import (
+ "errors"
"io"
"testing"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
diff --git a/lib/readers/pattern_reader.go b/lib/readers/pattern_reader.go
index a480dc185..edceef24d 100644
--- a/lib/readers/pattern_reader.go
+++ b/lib/readers/pattern_reader.go
@@ -1,9 +1,8 @@
package readers
import (
+ "errors"
"io"
-
- "github.com/pkg/errors"
)
// This is the smallest prime less than 256
diff --git a/lib/readers/repeatable.go b/lib/readers/repeatable.go
index e61b083d8..73de3fe90 100644
--- a/lib/readers/repeatable.go
+++ b/lib/readers/repeatable.go
@@ -1,10 +1,9 @@
package readers
import (
+ "errors"
"io"
"sync"
-
- "github.com/pkg/errors"
)
// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
diff --git a/lib/rest/rest.go b/lib/rest/rest.go
index 4490d617a..e534b0d1e 100644
--- a/lib/rest/rest.go
+++ b/lib/rest/rest.go
@@ -8,6 +8,8 @@ import (
"context"
"encoding/json"
"encoding/xml"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
"mime/multipart"
@@ -15,7 +17,6 @@ import (
"net/url"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/readers"
)
@@ -51,9 +52,9 @@ func ReadBody(resp *http.Response) (result []byte, err error) {
func defaultErrorHandler(resp *http.Response) (err error) {
body, err := ReadBody(resp)
if err != nil {
- return errors.Wrap(err, "error reading error out of body")
+ return fmt.Errorf("error reading error out of body: %w", err)
}
- return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
+ return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
// SetErrorHandler sets the handler to decode an error response when
@@ -272,7 +273,7 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
err = api.signer(req)
api.mu.RLock()
if err != nil {
- return nil, errors.Wrap(err, "signer failed")
+ return nil, fmt.Errorf("signer failed: %w", err)
}
}
api.mu.RUnlock()
@@ -286,7 +287,7 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
err = api.errorHandler(resp)
if err.Error() == "" {
// replace empty errors with something
- err = errors.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
+ err = fmt.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
}
return resp, err
}
@@ -364,7 +365,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
for _, val := range vals {
err = writer.WriteField(key, val)
if err != nil {
- _ = bodyWriter.CloseWithError(errors.Wrap(err, "create metadata part"))
+ _ = bodyWriter.CloseWithError(fmt.Errorf("create metadata part: %w", err))
return
}
}
@@ -373,20 +374,20 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
if in != nil {
part, err := writer.CreateFormFile(contentName, fileName)
if err != nil {
- _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to create form file"))
+ _ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return
}
_, err = io.Copy(part, in)
if err != nil {
- _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to copy data"))
+ _ = bodyWriter.CloseWithError(fmt.Errorf("failed to copy data: %w", err))
return
}
}
err = writer.Close()
if err != nil {
- _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to close form"))
+ _ = bodyWriter.CloseWithError(fmt.Errorf("failed to close form: %w", err))
return
}
diff --git a/lib/rest/url.go b/lib/rest/url.go
index 07ce15958..4a1d71390 100644
--- a/lib/rest/url.go
+++ b/lib/rest/url.go
@@ -1,9 +1,8 @@
package rest
import (
+ "fmt"
"net/url"
-
- "github.com/pkg/errors"
)
// URLJoin joins a URL and a path returning a new URL
@@ -12,7 +11,7 @@ import (
func URLJoin(base *url.URL, path string) (*url.URL, error) {
rel, err := url.Parse(path)
if err != nil {
- return nil, errors.Wrapf(err, "Error parsing %q as URL", path)
+ return nil, fmt.Errorf("Error parsing %q as URL: %w", path, err)
}
return base.ResolveReference(rel), nil
}
diff --git a/librclone/librclone/librclone.go b/librclone/librclone/librclone.go
index dabb02f0d..21c5aa5d5 100644
--- a/librclone/librclone/librclone.go
+++ b/librclone/librclone/librclone.go
@@ -15,7 +15,6 @@ import (
"runtime/debug"
"strings"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configfile"
@@ -91,23 +90,23 @@ func RPC(method string, input string) (output string, status int) {
// create a buffer to capture the output
err := json.NewDecoder(strings.NewReader(input)).Decode(&in)
if err != nil {
- return writeError(method, in, errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
+ return writeError(method, in, fmt.Errorf("failed to read input JSON: %w", err), http.StatusBadRequest)
}
// Find the call
call := rc.Calls.Get(method)
if call == nil {
- return writeError(method, in, errors.Errorf("couldn't find method %q", method), http.StatusNotFound)
+ return writeError(method, in, fmt.Errorf("couldn't find method %q", method), http.StatusNotFound)
}
// TODO: handle these cases
if call.NeedsRequest {
- return writeError(method, in, errors.Errorf("method %q needs request, not supported", method), http.StatusNotFound)
+ return writeError(method, in, fmt.Errorf("method %q needs request, not supported", method), http.StatusNotFound)
// Add the request to RC
//in["_request"] = r
}
if call.NeedsResponse {
- return writeError(method, in, errors.Errorf("method %q need response, not supported", method), http.StatusNotFound)
+ return writeError(method, in, fmt.Errorf("method %q need response, not supported", method), http.StatusNotFound)
//in["_response"] = w
}
diff --git a/vfs/dir.go b/vfs/dir.go
index 755fde807..2b2eda5a2 100644
--- a/vfs/dir.go
+++ b/vfs/dir.go
@@ -11,7 +11,6 @@ import (
"sync/atomic"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/list"
@@ -651,7 +650,7 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
}
}
default:
- err = errors.Errorf("unknown type %T", item)
+ err = fmt.Errorf("unknown type %T", item)
fs.Errorf(d, "readDir error: %v", err)
return err
}
@@ -712,7 +711,7 @@ func (d *Dir) stat(leaf string) (Node, error) {
if strings.ToLower(name) == leafLower {
if ok {
// duplicate case insensitive match is an error
- return nil, errors.Errorf("duplicate filename %q detected with --vfs-case-insensitive set", leaf)
+ return nil, fmt.Errorf("duplicate filename %q detected with --vfs-case-insensitive set", leaf)
}
// found a case insensitive match
ok = true
@@ -1003,14 +1002,14 @@ func (d *Dir) Rename(oldName, newName string, destDir *Dir) error {
return err
}
} else {
- err := errors.Errorf("Fs %q can't rename file that is not a vfs.File", d.f)
+ err := fmt.Errorf("Fs %q can't rename file that is not a vfs.File", d.f)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
case fs.Directory:
features := d.f.Features()
if features.DirMove == nil && features.Move == nil && features.Copy == nil {
- err := errors.Errorf("Fs %q can't rename directories (no DirMove, Move or Copy)", d.f)
+ err := fmt.Errorf("Fs %q can't rename directories (no DirMove, Move or Copy)", d.f)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
@@ -1030,7 +1029,7 @@ func (d *Dir) Rename(oldName, newName string, destDir *Dir) error {
}
}
default:
- err = errors.Errorf("unknown type %T", oldNode)
+ err = fmt.Errorf("unknown type %T", oldNode)
fs.Errorf(d.path, "Dir.Rename error: %v", err)
return err
}
diff --git a/vfs/file.go b/vfs/file.go
index d890b1f54..4c4fc214a 100644
--- a/vfs/file.go
+++ b/vfs/file.go
@@ -2,13 +2,14 @@ package vfs
import (
"context"
+ "errors"
+ "fmt"
"os"
"path"
"sync"
"sync/atomic"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
@@ -166,7 +167,7 @@ func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
f.mu.RUnlock()
if features := d.Fs().Features(); features.Move == nil && features.Copy == nil {
- err := errors.Errorf("Fs %q can't rename files (no server-side Move or Copy)", d.Fs())
+ err := fmt.Errorf("Fs %q can't rename files (no server-side Move or Copy)", d.Fs())
fs.Errorf(f.Path(), "Dir.Rename error: %v", err)
return err
}
diff --git a/vfs/rc.go b/vfs/rc.go
index e6aa4bae4..3450773f0 100644
--- a/vfs/rc.go
+++ b/vfs/rc.go
@@ -2,12 +2,12 @@ package vfs
import (
"context"
+ "errors"
"fmt"
"strconv"
"strings"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/rc"
@@ -44,9 +44,9 @@ func getVFS(in rc.Params) (vfs *VFS, err error) {
fsString = cache.Canonicalize(fsString)
activeVFS := active[fsString]
if len(activeVFS) == 0 {
- return nil, errors.Errorf("no VFS found with name %q", fsString)
+ return nil, fmt.Errorf("no VFS found with name %q", fsString)
} else if len(activeVFS) > 1 {
- return nil, errors.Errorf("more than one VFS active with name %q", fsString)
+ return nil, fmt.Errorf("more than one VFS active with name %q", fsString)
}
delete(in, "fs") // delete the fs parameter
return activeVFS[0], nil
@@ -111,11 +111,11 @@ func rcRefresh(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if v, ok := in[k]; ok {
s, ok := v.(string)
if !ok {
- return out, errors.Errorf("value must be string %q=%v", k, v)
+ return out, fmt.Errorf("value must be string %q=%v", k, v)
}
recursive, err = strconv.ParseBool(s)
if err != nil {
- return out, errors.Errorf("invalid value %q=%v", k, v)
+ return out, fmt.Errorf("invalid value %q=%v", k, v)
}
delete(in, k)
}
@@ -137,7 +137,7 @@ func rcRefresh(ctx context.Context, in rc.Params) (out rc.Params, err error) {
for k, v := range in {
path, ok := v.(string)
if !ok {
- return out, errors.Errorf("value must be string %q=%v", k, v)
+ return out, fmt.Errorf("value must be string %q=%v", k, v)
}
if strings.HasPrefix(k, "dir") {
dir, err := getDir(path)
@@ -156,7 +156,7 @@ func rcRefresh(ctx context.Context, in rc.Params) (out rc.Params, err error) {
}
}
} else {
- return out, errors.Errorf("unknown key %q", k)
+ return out, fmt.Errorf("unknown key %q", k)
}
}
}
@@ -208,7 +208,7 @@ func rcForget(ctx context.Context, in rc.Params) (out rc.Params, err error) {
for k, v := range in {
path, ok := v.(string)
if !ok {
- return out, errors.Errorf("value must be string %q=%v", k, v)
+ return out, fmt.Errorf("value must be string %q=%v", k, v)
}
path = strings.Trim(path, "/")
if strings.HasPrefix(k, "file") {
@@ -216,7 +216,7 @@ func rcForget(ctx context.Context, in rc.Params) (out rc.Params, err error) {
} else if strings.HasPrefix(k, "dir") {
root.ForgetPath(path, fs.EntryDirectory)
} else {
- return out, errors.Errorf("unknown key %q", k)
+ return out, fmt.Errorf("unknown key %q", k)
}
forgotten = append(forgotten, path)
}
@@ -230,11 +230,11 @@ func rcForget(ctx context.Context, in rc.Params) (out rc.Params, err error) {
func getDuration(k string, v interface{}) (time.Duration, error) {
s, ok := v.(string)
if !ok {
- return 0, errors.Errorf("value must be string %q=%v", k, v)
+ return 0, fmt.Errorf("value must be string %q=%v", k, v)
}
interval, err := fs.ParseDuration(s)
if err != nil {
- return 0, errors.Wrap(err, "parse duration")
+ return 0, fmt.Errorf("parse duration: %w", err)
}
return interval, nil
}
@@ -272,7 +272,7 @@ func getTimeout(in rc.Params) (time.Duration, error) {
func getStatus(vfs *VFS, in rc.Params) (out rc.Params, err error) {
for k, v := range in {
- return nil, errors.Errorf("invalid parameter: %s=%s", k, v)
+ return nil, fmt.Errorf("invalid parameter: %s=%s", k, v)
}
return rc.Params{
"enabled": vfs.Opt.PollInterval != 0,
@@ -329,7 +329,7 @@ func rcPollInterval(ctx context.Context, in rc.Params) (out rc.Params, err error
return nil, err
}
for k, v := range in {
- return nil, errors.Errorf("invalid parameter: %s=%s", k, v)
+ return nil, fmt.Errorf("invalid parameter: %s=%s", k, v)
}
if vfs.pollChan == nil {
return nil, errors.New("poll-interval is not supported by this remote")
diff --git a/vfs/read.go b/vfs/read.go
index 1dae5a8d8..a128b0313 100644
--- a/vfs/read.go
+++ b/vfs/read.go
@@ -2,12 +2,13 @@ package vfs
import (
"context"
+ "errors"
+ "fmt"
"io"
"os"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
@@ -353,7 +354,7 @@ func (fh *ReadFileHandle) checkHash() error {
for hashType, dstSum := range fh.hash.Sums() {
srcSum, err := o.Hash(context.TODO(), hashType)
if err != nil {
- if os.IsNotExist(errors.Cause(err)) {
+ if errors.Is(err, os.ErrNotExist) {
// if it was file not found then at
// this point we don't care any more
continue
@@ -361,7 +362,7 @@ func (fh *ReadFileHandle) checkHash() error {
return err
}
if !hash.Equals(dstSum, srcSum) {
- return errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
+ return fmt.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
}
}
diff --git a/vfs/read_write.go b/vfs/read_write.go
index 50697ce3e..a66ea051f 100644
--- a/vfs/read_write.go
+++ b/vfs/read_write.go
@@ -6,7 +6,6 @@ import (
"os"
"sync"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs/vfscache"
@@ -54,7 +53,7 @@ func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) {
if !fh.readOnly() && (fh.flags&os.O_TRUNC != 0 || (fh.flags&os.O_CREATE != 0 && !exists)) {
err = fh.Truncate(0)
if err != nil {
- return nil, errors.Wrap(err, "cache open with O_TRUNC: failed to truncate")
+ return nil, fmt.Errorf("cache open with O_TRUNC: failed to truncate: %w", err)
}
// we definitely need to write back the item even if we don't write to it
item.Dirty()
@@ -92,7 +91,7 @@ func (fh *RWFileHandle) openPending() (err error) {
o := fh.file.getObject()
err = fh.item.Open(o)
if err != nil {
- return errors.Wrap(err, "open RW handle failed to open cache file")
+ return fmt.Errorf("open RW handle failed to open cache file: %w", err)
}
size := fh._size() // update size in file and read size
diff --git a/vfs/read_write_test.go b/vfs/read_write_test.go
index f7335087c..1f5211472 100644
--- a/vfs/read_write_test.go
+++ b/vfs/read_write_test.go
@@ -2,6 +2,7 @@ package vfs
import (
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -9,7 +10,6 @@ import (
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
@@ -398,7 +398,7 @@ func TestRWFileHandleWriteNoWrite(t *testing.T) {
// Close the file without writing to it
err := fh.Close()
- if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles {
+ if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
t.Logf("skipping test: %v", err)
return
}
diff --git a/vfs/vfs_test.go b/vfs/vfs_test.go
index 4ece14c4f..893eeecbe 100644
--- a/vfs/vfs_test.go
+++ b/vfs/vfs_test.go
@@ -4,13 +4,13 @@ package vfs
import (
"context"
+ "errors"
"fmt"
"io"
"os"
"testing"
"time"
- "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/all" // import all the backends
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
@@ -283,7 +283,7 @@ func TestVFSOpenFile(t *testing.T) {
require.NoError(t, err)
assert.NotNil(t, fd)
err = fd.Close()
- if errors.Cause(err) != fs.ErrorCantUploadEmptyFiles {
+ if !errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
require.NoError(t, err)
}
diff --git a/vfs/vfscache/cache.go b/vfs/vfscache/cache.go
index 483bdf39c..88d1f2f24 100644
--- a/vfs/vfscache/cache.go
+++ b/vfs/vfscache/cache.go
@@ -3,6 +3,7 @@ package vfscache
import (
"context"
+ "errors"
"fmt"
"os"
"path"
@@ -14,7 +15,6 @@ import (
"time"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
fscache "github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
@@ -130,7 +130,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVir
// load in the cache and metadata off disk
err = c.reload(ctx)
if err != nil {
- return nil, errors.Wrap(err, "failed to load cache")
+ return nil, fmt.Errorf("failed to load cache: %w", err)
}
// Remove any empty directories
@@ -160,9 +160,9 @@ func createRootDir(parentOSPath string, name string, relativeDirOSPath string) (
// createRootDirs creates all cache root directories
func createRootDirs(parentOSPath string, relativeDirOSPath string) (dataOSPath string, metaOSPath string, err error) {
if dataOSPath, err = createRootDir(parentOSPath, "vfs", relativeDirOSPath); err != nil {
- err = errors.Wrap(err, "failed to create data cache directory")
+ err = fmt.Errorf("failed to create data cache directory: %w", err)
} else if metaOSPath, err = createRootDir(parentOSPath, "vfsMeta", relativeDirOSPath); err != nil {
- err = errors.Wrap(err, "failed to create metadata cache directory")
+ err = fmt.Errorf("failed to create metadata cache directory: %w", err)
}
return
}
@@ -176,12 +176,12 @@ func (c *Cache) createItemDir(name string) (string, error) {
parentPath := c.toOSPath(parent)
err := createDir(parentPath)
if err != nil {
- return "", errors.Wrap(err, "failed to create data cache item directory")
+ return "", fmt.Errorf("failed to create data cache item directory: %w", err)
}
parentPathMeta := c.toOSPathMeta(parent)
err = createDir(parentPathMeta)
if err != nil {
- return "", errors.Wrap(err, "failed to create metadata cache item directory")
+ return "", fmt.Errorf("failed to create metadata cache item directory: %w", err)
}
return filepath.Join(parentPath, leaf), nil
}
@@ -195,9 +195,9 @@ func getBackend(ctx context.Context, parentPath string, name string, relativeDir
// getBackends gets backends for all cache root dirs
func getBackends(ctx context.Context, parentPath string, relativeDirPath string) (fdata fs.Fs, fmeta fs.Fs, err error) {
if fdata, err = getBackend(ctx, parentPath, "vfs", relativeDirPath); err != nil {
- err = errors.Wrap(err, "failed to get data cache backend")
+ err = fmt.Errorf("failed to get data cache backend: %w", err)
} else if fmeta, err = getBackend(ctx, parentPath, "vfsMeta", relativeDirPath); err != nil {
- err = errors.Wrap(err, "failed to get metadata cache backend")
+ err = fmt.Errorf("failed to get metadata cache backend: %w", err)
}
return
}
@@ -342,32 +342,32 @@ func rename(osOldPath, osNewPath string) error {
if os.IsNotExist(err) {
return nil
}
- return errors.Wrapf(err, "Failed to stat source: %s", osOldPath)
+ return fmt.Errorf("Failed to stat source: %s: %w", osOldPath, err)
}
if !sfi.Mode().IsRegular() {
// cannot copy non-regular files (e.g., directories, symlinks, devices, etc.)
- return errors.Errorf("Non-regular source file: %s (%q)", sfi.Name(), sfi.Mode().String())
+ return fmt.Errorf("Non-regular source file: %s (%q)", sfi.Name(), sfi.Mode().String())
}
dfi, err := os.Stat(osNewPath)
if err != nil {
if !os.IsNotExist(err) {
- return errors.Wrapf(err, "Failed to stat destination: %s", osNewPath)
+ return fmt.Errorf("Failed to stat destination: %s: %w", osNewPath, err)
}
parent := vfscommon.OsFindParent(osNewPath)
err = createDir(parent)
if err != nil {
- return errors.Wrapf(err, "Failed to create parent dir: %s", parent)
+ return fmt.Errorf("Failed to create parent dir: %s: %w", parent, err)
}
} else {
if !(dfi.Mode().IsRegular()) {
- return errors.Errorf("Non-regular destination file: %s (%q)", dfi.Name(), dfi.Mode().String())
+ return fmt.Errorf("Non-regular destination file: %s (%q)", dfi.Name(), dfi.Mode().String())
}
if os.SameFile(sfi, dfi) {
return nil
}
}
if err = os.Rename(osOldPath, osNewPath); err != nil {
- return errors.Wrapf(err, "Failed to rename in cache: %s to %s", osOldPath, osNewPath)
+ return fmt.Errorf("Failed to rename in cache: %s to %s: %w", osOldPath, osNewPath, err)
}
return nil
}
@@ -478,7 +478,7 @@ func (c *Cache) walk(dir string, fn func(osPath string, fi os.FileInfo, name str
// Find path relative to the cache root
name, err := filepath.Rel(dir, osPath)
if err != nil {
- return errors.Wrap(err, "filepath.Rel failed in walk")
+ return fmt.Errorf("filepath.Rel failed in walk: %w", err)
}
if name == "." {
name = ""
@@ -511,7 +511,7 @@ func (c *Cache) reload(ctx context.Context) error {
return nil
})
if err != nil {
- return errors.Wrapf(err, "failed to walk cache %q", dir)
+ return fmt.Errorf("failed to walk cache %q: %w", dir, err)
}
}
return nil
diff --git a/vfs/vfscache/downloaders/downloaders.go b/vfs/vfscache/downloaders/downloaders.go
index 94c4133d8..c6d769a4f 100644
--- a/vfs/vfscache/downloaders/downloaders.go
+++ b/vfs/vfscache/downloaders/downloaders.go
@@ -2,10 +2,11 @@ package downloaders
import (
"context"
+ "errors"
+ "fmt"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/asyncreader"
@@ -182,7 +183,7 @@ func (dls *Downloaders) _newDownloader(r ranges.Range) (dl *downloader, err erro
err = dl.open(dl.offset)
if err != nil {
_ = dl.close(err)
- return nil, errors.Wrap(err, "failed to open downloader")
+ return nil, fmt.Errorf("failed to open downloader: %w", err)
}
dls.dls = append(dls.dls, dl)
@@ -361,7 +362,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
dl, err = dls._newDownloader(r)
if err != nil {
dls._countErrors(0, err)
- return errors.Wrap(err, "failed to start downloader")
+ return fmt.Errorf("failed to start downloader: %w", err)
}
return err
}
@@ -534,7 +535,7 @@ func (dl *downloader) open(offset int64) (err error) {
in0 := chunkedreader.New(context.TODO(), dl.dls.src, int64(dl.dls.opt.ChunkSize), int64(dl.dls.opt.ChunkSizeLimit))
_, err = in0.Seek(offset, 0)
if err != nil {
- return errors.Wrap(err, "vfs reader: failed to open source file")
+ return fmt.Errorf("vfs reader: failed to open source file: %w", err)
}
dl.in = dl.tr.Account(dl.dls.ctx, in0).WithBuffer() // account and buffer the transfer
@@ -550,7 +551,7 @@ func (dl *downloader) open(offset int64) (err error) {
func (dl *downloader) close(inErr error) (err error) {
// defer log.Trace(dl.dls.src, "inErr=%v", err)("err=%v", &err)
checkErr := func(e error) {
- if e == nil || errors.Cause(err) == asyncreader.ErrorStreamAbandoned {
+ if e == nil || errors.Is(err, asyncreader.ErrorStreamAbandoned) {
return
}
err = e
@@ -617,8 +618,8 @@ func (dl *downloader) stopAndClose(inErr error) (err error) {
func (dl *downloader) download() (n int64, err error) {
// defer log.Trace(dl.dls.src, "")("err=%v", &err)
n, err = dl.in.WriteTo(dl)
- if err != nil && errors.Cause(err) != asyncreader.ErrorStreamAbandoned {
- return n, errors.Wrap(err, "vfs reader: failed to write to cache file")
+ if err != nil && !errors.Is(err, asyncreader.ErrorStreamAbandoned) {
+ return n, fmt.Errorf("vfs reader: failed to write to cache file: %w", err)
}
return n, nil
diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go
index b8974ff71..6e06bcd51 100644
--- a/vfs/vfscache/item.go
+++ b/vfs/vfscache/item.go
@@ -3,13 +3,13 @@ package vfscache
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"os"
"sync"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/operations"
@@ -199,13 +199,13 @@ func (item *Item) load() (exists bool, err error) {
if os.IsNotExist(err) {
return false, err
}
- return true, errors.Wrap(err, "vfs cache item: failed to read metadata")
+ return true, fmt.Errorf("vfs cache item: failed to read metadata: %w", err)
}
defer fs.CheckClose(in, &err)
decoder := json.NewDecoder(in)
err = decoder.Decode(&item.info)
if err != nil {
- return true, errors.Wrap(err, "vfs cache item: corrupt metadata")
+ return true, fmt.Errorf("vfs cache item: corrupt metadata: %w", err)
}
return true, nil
}
@@ -217,14 +217,14 @@ func (item *Item) _save() (err error) {
osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
out, err := os.Create(osPathMeta)
if err != nil {
- return errors.Wrap(err, "vfs cache item: failed to write metadata")
+ return fmt.Errorf("vfs cache item: failed to write metadata: %w", err)
}
defer fs.CheckClose(out, &err)
encoder := json.NewEncoder(out)
encoder.SetIndent("", "\t")
err = encoder.Encode(item.info)
if err != nil {
- return errors.Wrap(err, "vfs cache item: failed to encode metadata")
+ return fmt.Errorf("vfs cache item: failed to encode metadata: %w", err)
}
return nil
}
@@ -261,7 +261,7 @@ func (item *Item) _truncate(size int64) (err error) {
fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
}
if err != nil {
- return errors.Wrap(err, "vfs cache: truncate: failed to open cache file")
+ return fmt.Errorf("vfs cache: truncate: failed to open cache file: %w", err)
}
defer fs.CheckClose(fd, &err)
@@ -276,7 +276,7 @@ func (item *Item) _truncate(size int64) (err error) {
err = fd.Truncate(size)
if err != nil {
- return errors.Wrap(err, "vfs cache: truncate")
+ return fmt.Errorf("vfs cache: truncate: %w", err)
}
item.info.Size = size
@@ -291,8 +291,8 @@ func (item *Item) _truncate(size int64) (err error) {
// call with the lock held
func (item *Item) _truncateToCurrentSize() (err error) {
size, err := item._getSize()
- if err != nil && !os.IsNotExist(errors.Cause(err)) {
- return errors.Wrap(err, "truncate to current size")
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("truncate to current size: %w", err)
}
if size < 0 {
// FIXME ignore unknown length files
@@ -326,8 +326,8 @@ func (item *Item) Truncate(size int64) (err error) {
// Read old size
oldSize, err := item._getSize()
if err != nil {
- if !os.IsNotExist(errors.Cause(err)) {
- return errors.Wrap(err, "truncate failed to read size")
+ if !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("truncate failed to read size: %w", err)
}
oldSize = 0
}
@@ -462,7 +462,7 @@ func (item *Item) _createFile(osPath string) (err error) {
item.modified = false
fd, err := file.OpenFile(osPath, os.O_RDWR, 0600)
if err != nil {
- return errors.Wrap(err, "vfs cache item: open failed")
+ return fmt.Errorf("vfs cache item: open failed: %w", err)
}
err = file.SetSparse(fd)
if err != nil {
@@ -477,7 +477,7 @@ func (item *Item) _createFile(osPath string) (err error) {
fs.Errorf(item.name, "vfs cache: item.fd.Close: closeErr: %v", err)
}
item.fd = nil
- return errors.Wrap(err, "vfs cache item: _save failed")
+ return fmt.Errorf("vfs cache item: _save failed: %w", err)
}
return err
}
@@ -513,12 +513,12 @@ func (item *Item) open(o fs.Object) (err error) {
osPath, err := item.c.createItemDir(item.name) // No locking in Cache
if err != nil {
- return errors.Wrap(err, "vfs cache item: createItemDir failed")
+ return fmt.Errorf("vfs cache item: createItemDir failed: %w", err)
}
err = item._checkObject(o)
if err != nil {
- return errors.Wrap(err, "vfs cache item: check object failed")
+ return fmt.Errorf("vfs cache item: check object failed: %w", err)
}
item.opens++
@@ -531,7 +531,7 @@ func (item *Item) open(o fs.Object) (err error) {
item._remove("item.open failed on _createFile, remove cache data/metadata files")
item.fd = nil
item.opens--
- return errors.Wrap(err, "vfs cache item: create cache file failed")
+ return fmt.Errorf("vfs cache item: create cache file failed: %w", err)
}
// Unlock the Item.mu so we can call some methods which take Cache.mu
item.mu.Unlock()
@@ -548,7 +548,7 @@ func (item *Item) open(o fs.Object) (err error) {
if oldItem.opens != 0 {
// Put the item back and return an error
item.c.put(item.name, oldItem) // LOCKING in Cache method
- err = errors.Errorf("internal error: item %q already open in the cache", item.name)
+ err = fmt.Errorf("internal error: item %q already open in the cache", item.name)
}
oldItem.mu.Unlock()
}
@@ -574,7 +574,7 @@ func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
// Transfer the temp file to the remote
cacheObj, err := item.c.fcache.NewObject(ctx, item.name)
if err != nil && err != fs.ErrorObjectNotFound {
- return errors.Wrap(err, "vfs cache: failed to find cache file")
+ return fmt.Errorf("vfs cache: failed to find cache file: %w", err)
}
// Object has disappeared if cacheObj == nil
@@ -584,7 +584,7 @@ func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
o, err := operations.Copy(ctx, item.c.fremote, o, name, cacheObj)
item.mu.Lock()
if err != nil {
- return errors.Wrap(err, "vfs cache: failed to transfer file from cache to remote")
+ return fmt.Errorf("vfs cache: failed to transfer file from cache to remote: %w", err)
}
item.o = o
item._updateFingerprint()
@@ -648,7 +648,7 @@ func (item *Item) Close(storeFn StoreFn) (err error) {
if item.info.Dirty && item.o != nil {
err = item._ensure(0, item.info.Size)
if err != nil {
- return errors.Wrap(err, "vfs cache: failed to download missing parts of cache file")
+ return fmt.Errorf("vfs cache: failed to download missing parts of cache file: %w", err)
}
}
@@ -751,11 +751,11 @@ func (item *Item) reload(ctx context.Context) error {
// put the file into the directory listings
size, err := item._getSize()
if err != nil {
- return errors.Wrap(err, "reload: failed to read size")
+ return fmt.Errorf("reload: failed to read size: %w", err)
}
err = item.c.AddVirtual(item.name, size, false)
if err != nil {
- return errors.Wrap(err, "reload: failed to add virtual dir entry")
+ return fmt.Errorf("reload: failed to add virtual dir entry: %w", err)
}
return nil
}
@@ -807,7 +807,7 @@ func (item *Item) _checkObject(o fs.Object) error {
err := item._truncateToCurrentSize()
if err != nil {
- return errors.Wrap(err, "vfs cache item: open truncate failed")
+ return fmt.Errorf("vfs cache item: open truncate failed: %w", err)
}
return nil
@@ -1265,7 +1265,7 @@ func (item *Item) WriteAt(b []byte, off int64) (n int, err error) {
// Do the writing with Item.mu unlocked
n, err = item.fd.WriteAt(b, off)
if err == nil && n != len(b) {
- err = errors.Errorf("short write: tried to write %d but only %d written", len(b), n)
+ err = fmt.Errorf("short write: tried to write %d but only %d written", len(b), n)
}
item.mu.Lock()
item._written(off, int64(n))
@@ -1327,7 +1327,7 @@ func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, e
// fs.Debugf(item.name, "write chunk offset=%d size=%d", off, size)
nn, err = item.fd.WriteAt(b[:size], off)
if err == nil && nn != size {
- err = errors.Errorf("downloader: short write: tried to write %d but only %d written", size, nn)
+ err = fmt.Errorf("downloader: short write: tried to write %d but only %d written", size, nn)
}
item._written(off, int64(nn))
}
@@ -1356,11 +1356,11 @@ func (item *Item) Sync() (err error) {
// sync the file and the metadata to disk
err = item.fd.Sync()
if err != nil {
- return errors.Wrap(err, "vfs cache item sync: failed to sync file")
+ return fmt.Errorf("vfs cache item sync: failed to sync file: %w", err)
}
err = item._save()
if err != nil {
- return errors.Wrap(err, "vfs cache item sync: failed to sync metadata")
+ return fmt.Errorf("vfs cache item sync: failed to sync metadata: %w", err)
}
return nil
}
diff --git a/vfs/vfscache/writeback/writeback.go b/vfs/vfscache/writeback/writeback.go
index 5db10c603..5f63f5461 100644
--- a/vfs/vfscache/writeback/writeback.go
+++ b/vfs/vfscache/writeback/writeback.go
@@ -5,12 +5,12 @@ package writeback
import (
"container/heap"
"context"
+ "errors"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/fs"
- "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/vfs/vfscommon"
)
@@ -365,7 +365,7 @@ func (wb *WriteBack) upload(ctx context.Context, wbItem *writeBackItem) {
if wbItem.delay > maxUploadDelay {
wbItem.delay = maxUploadDelay
}
- if _, uerr := fserrors.Cause(err); uerr == context.Canceled {
+ if errors.Is(err, context.Canceled) {
fs.Infof(wbItem.name, "vfs cache: upload canceled")
// Upload was cancelled so reset timer
wbItem.delay = wb.opt.WriteBack
diff --git a/vfs/vfscache/writeback/writeback_test.go b/vfs/vfscache/writeback/writeback_test.go
index 6b0398b50..512d0eeb7 100644
--- a/vfs/vfscache/writeback/writeback_test.go
+++ b/vfs/vfscache/writeback/writeback_test.go
@@ -3,13 +3,13 @@ package writeback
import (
"container/heap"
"context"
+ "errors"
"fmt"
"strings"
"sync"
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
diff --git a/vfs/vfscommon/cachemode.go b/vfs/vfscommon/cachemode.go
index 38b6fec99..a7f35fe9c 100644
--- a/vfs/vfscommon/cachemode.go
+++ b/vfs/vfscommon/cachemode.go
@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/rclone/rclone/fs"
- "github.com/rclone/rclone/lib/errors"
)
// CacheMode controls the functionality of the cache
@@ -41,7 +40,7 @@ func (l *CacheMode) Set(s string) error {
return nil
}
}
- return errors.Errorf("Unknown cache mode level %q", s)
+ return fmt.Errorf("Unknown cache mode level %q", s)
}
// Type of the value
@@ -53,7 +52,7 @@ func (l *CacheMode) Type() string {
func (l *CacheMode) UnmarshalJSON(in []byte) error {
return fs.UnmarshalJSONFlag(in, l, func(i int64) error {
if i < 0 || i >= int64(len(cacheModeToString)) {
- return errors.Errorf("Unknown cache mode level %d", i)
+ return fmt.Errorf("Unknown cache mode level %d", i)
}
*l = CacheMode(i)
return nil
diff --git a/vfs/vfsflags/filemode.go b/vfs/vfsflags/filemode.go
index 7d8eb028b..50a8bc423 100644
--- a/vfs/vfsflags/filemode.go
+++ b/vfs/vfsflags/filemode.go
@@ -4,8 +4,6 @@ import (
"fmt"
"os"
"strconv"
-
- "github.com/pkg/errors"
)
// FileMode is a command line friendly os.FileMode
@@ -22,7 +20,7 @@ func (x *FileMode) String() string {
func (x *FileMode) Set(s string) error {
i, err := strconv.ParseInt(s, 8, 64)
if err != nil {
- return errors.Wrap(err, "Bad FileMode - must be octal digits")
+ return fmt.Errorf("Bad FileMode - must be octal digits: %w", err)
}
*x.Mode = (os.FileMode)(i)
return nil
diff --git a/vfs/write_test.go b/vfs/write_test.go
index 431ae084e..99f5d63b2 100644
--- a/vfs/write_test.go
+++ b/vfs/write_test.go
@@ -2,13 +2,13 @@ package vfs
import (
"context"
+ "errors"
"io"
"os"
"sync"
"testing"
"time"
- "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
@@ -117,7 +117,7 @@ func TestWriteFileHandleMethods(t *testing.T) {
h, err = vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
require.NoError(t, err)
err = h.Close()
- if errors.Cause(err) != fs.ErrorCantUploadEmptyFiles {
+ if !errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
assert.NoError(t, err)
checkListing(t, root, []string{"file1,0,false"})
}
@@ -215,7 +215,7 @@ func TestWriteFileHandleRelease(t *testing.T) {
// Check Release closes file
err := fh.Release()
- if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles {
+ if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
t.Logf("skipping test: %v", err)
return
}
@@ -298,7 +298,7 @@ func testFileReadAt(t *testing.T, n int) {
// Close the file without writing to it if n==0
err := fh.Close()
- if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles {
+ if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
t.Logf("skipping test: %v", err)
return
}