Compare commits

...
Sign in to create a new pull request.

5 commits

Author SHA1 Message Date
Nick Craig-Wood
89cd3a90e3 storj: fix uploading to the wrong object on Update with overriden remote name
In this commit we discovered a problem with objects being uploaded to
the incorrect object name. It added an integration test for the
problem.

65b2e378e0 drive: fix incorrect remote after Update on object

This test was tripped by the Storj backend and this patch fixes the
problem.
2023-06-27 12:46:55 +01:00
Nick Craig-Wood
6d3f53d7bc storj: fix "uplink: too many requests" errors when uploading to the same file
Storj has a rate limit of 1 per second when uploading to the same
file.

This was being tripped by the integration tests.

This patch fixes it by detecting the error and sleeping for 1 second
before retrying.

See: https://github.com/storj/uplink/issues/149
2023-06-27 12:44:09 +01:00
Nick Craig-Wood
3c45f880d6 fstests: allow ObjectUpdate test to retry upload 2023-06-27 12:42:16 +01:00
Nick Craig-Wood
572516e301 webdav: make --webdav-auth-redirect to fix 401 unauthorized on redirect
Before this change, if the server returned a 302 redirect message when
opening a file rclone would do the redirect but drop the
Authorization: header. This is a sensible thing to do for security
reasons but breaks some setups.

This patch adds the --webdav-auth-redirect flag which makes it
preserve the auth just for this kind of request.

See: https://forum.rclone.org/t/webdav-401-unauthorized-when-server-redirects-to-another-domain/39292
2023-06-27 09:30:47 +01:00
Nick Craig-Wood
1e9613c186 rest: make auth preserving redirects an option 2023-06-27 09:22:10 +01:00
5 changed files with 78 additions and 14 deletions

View file

@ -528,7 +528,11 @@ func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err e
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
return f.put(ctx, in, src, src.Remote(), options...)
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (_ fs.Object, err error) {
fs.Debugf(f, "cp input ./%s # %+v %d", remote, options, src.Size())
// Reject options we don't support.
for _, option := range options {
@ -539,7 +543,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
}
bucketName, bucketPath := f.absolute(src.Remote())
bucketName, bucketPath := f.absolute(remote)
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
if err != nil {
@ -549,7 +553,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
if err != nil {
aerr := upload.Abort()
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
fs.Errorf(f, "cp input ./%s %+v: %+v", remote, options, aerr)
}
}
}()
@ -574,7 +578,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
err = fserrors.RetryError(err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", remote, options, err)
return nil, err
}
@ -589,11 +593,19 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err
}
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
} else if errors.Is(err, uplink.ErrTooManyRequests) {
// Storj has a rate limit of 1 per second of uploading to the same file.
// This produces ErrTooManyRequests here, so we wait 1 second and retry.
//
// See: https://github.com/storj/uplink/issues/149
fs.Debugf(f, "uploading too fast - sleeping for 1 second: %v", err)
time.Sleep(time.Second)
err = fserrors.RetryError(err)
}
return nil, err
}
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
return newObjectFromUplink(f, remote, upload.Info()), nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate

View file

@ -176,9 +176,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
fs.Debugf(o, "cp input ./%s %+v", o.Remote(), options)
oNew, err := o.fs.Put(ctx, in, src, options...)
oNew, err := o.fs.put(ctx, in, src, o.Remote(), options...)
if err == nil {
*o = *(oNew.(*Object))

View file

@ -144,6 +144,23 @@ Set to 0 to disable chunked uploading.
`,
Advanced: true,
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
}, {
Name: "auth_redirect",
Help: `Preserve authentication on redirect.
If the server redirects rclone to a new domain when it is trying to
read a file then normally rclone will drop the Authorization: header
from the request.
This is standard security practice to avoid sending your credentials
to an unknown webserver.
However this is desirable in some circumstances. If you are getting
an error like "401 Unauthorized" when rclone is attempting to read
files from the webdav server then you can try this option.
`,
Advanced: true,
Default: false,
}},
})
}
@ -160,6 +177,7 @@ type Options struct {
Headers fs.CommaSepList `config:"headers"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
AuthRedirect bool `config:"auth_redirect"`
}
// Fs represents a remote webdav
@ -1375,6 +1393,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
ExtraHeaders: map[string]string{
"Depth": "0",
},
AuthRedirect: o.fs.opt.AuthRedirect, // allow redirects to preserve Auth
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)

View file

@ -1516,19 +1516,21 @@ func Run(t *testing.T, opt *Opt) {
t.Run("ObjectUpdate", func(t *testing.T) {
skipIfNotOk(t)
contents := random.String(200)
buf := bytes.NewBufferString(contents)
hash := hash.NewMultiHasher()
in := io.TeeReader(buf, hash)
var h *hash.MultiHasher
file1.Size = int64(buf.Len())
file1.Size = int64(len(contents))
obj := findObject(ctx, t, f, file1.Path)
remoteBefore := obj.Remote()
obji := object.NewStaticObjectInfo(file1.Path+"-should-be-ignored.bin", file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
err := obj.Update(ctx, in, obji)
require.NoError(t, err)
retry(t, "Update object", func() error {
buf := bytes.NewBufferString(contents)
h = hash.NewMultiHasher()
in := io.TeeReader(buf, h)
return obj.Update(ctx, in, obji)
})
remoteAfter := obj.Remote()
assert.Equal(t, remoteBefore, remoteAfter, "Remote should not change")
file1.Hashes = hash.Sums()
file1.Hashes = h.Sums()
// check the object has been updated
file1.Check(t, obj, f.Precision())

View file

@ -150,6 +150,7 @@ type Opts struct {
Trailer *http.Header // set the request trailer
Close bool // set to close the connection after this transaction
NoRedirect bool // if this is set then the client won't follow redirects
AuthRedirect bool // if this is set then the client will redirect with Auth
}
// Copy creates a copy of the options
@ -210,6 +211,34 @@ func ClientWithNoRedirects(c *http.Client) *http.Client {
return &clientCopy
}
// ClientWithAuthRedirects makes a new http client which will re-apply Auth on redirects
func ClientWithAuthRedirects(c *http.Client) *http.Client {
clientCopy := *c
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
} else if len(via) == 0 {
return nil
}
prevReq := via[len(via)-1]
resp := req.Response
if resp == nil {
return nil
}
// Look at previous response to see if it was a redirect and preserve auth if so
switch resp.StatusCode {
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther, http.StatusTemporaryRedirect, http.StatusPermanentRedirect:
// Reapply Auth (if any) from previous request on redirect
auth := prevReq.Header.Get("Authorization")
if auth != "" {
req.Header.Add("Authorization", auth)
}
}
return nil
}
return &clientCopy
}
// Call makes the call and returns the http.Response
//
// if err == nil then resp.Body will need to be closed unless
@ -302,6 +331,8 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
var c *http.Client
if opts.NoRedirect {
c = ClientWithNoRedirects(api.c)
} else if opts.AuthRedirect {
c = ClientWithAuthRedirects(api.c)
} else {
c = api.c
}