Merge branch 'master' into webdav-tus

This commit is contained in:
Christian Richter 2024-12-04 08:29:04 +00:00 committed by GitHub
commit 421f90307c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 81 additions and 28 deletions

View file

@ -106,9 +106,9 @@ func newOptions() []fs.Option {
Sensitive: true, Sensitive: true,
}, { }, {
Name: "compartment", Name: "compartment",
Help: "Object storage compartment OCID", Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
Provider: "!no_auth", Provider: "!no_auth",
Required: true, Required: false,
Sensitive: true, Sensitive: true,
}, { }, {
Name: "region", Name: "region",

View file

@ -399,14 +399,15 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
if err != nil { if err != nil {
return nil, fmt.Errorf("open file: %w", err) return nil, fmt.Errorf("open file: %w", err)
} }
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
return nil, fmt.Errorf("close file: %w", err)
}
writer := &writerAt{ writer := &writerAt{
ctx: ctx, ctx: ctx,
client: client,
fs: f, fs: f,
size: size, size: size,
remote: remote, remote: remote,
fd: openResult.FileDescriptor,
fileID: openResult.Fileid, fileID: openResult.Fileid,
} }

View file

@ -18,21 +18,14 @@ import (
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud. // writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
type writerAt struct { type writerAt struct {
ctx context.Context ctx context.Context
client *rest.Client
fs *Fs fs *Fs
size int64 size int64
remote string remote string
fd int64
fileID int64 fileID int64
} }
// Close implements WriterAt.Close. // Close implements WriterAt.Close.
func (c *writerAt) Close() error { func (c *writerAt) Close() error {
// close fd
if _, err := c.fileClose(c.ctx); err != nil {
return fmt.Errorf("close fd: %w", err)
}
// Avoiding race conditions: Depending on the tcp connection, there might be // Avoiding race conditions: Depending on the tcp connection, there might be
// caching issues when checking the size immediately after write. // caching issues when checking the size immediately after write.
// Hence we try avoiding them by checking the resulting size on a different connection. // Hence we try avoiding them by checking the resulting size on a different connection.
@ -72,8 +65,18 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
inSHA1Bytes := sha1.Sum(buffer) inSHA1Bytes := sha1.Sum(buffer)
inSHA1 := hex.EncodeToString(inSHA1Bytes[:]) inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
client, err := c.fs.newSingleConnClient(c.ctx)
if err != nil {
return 0, fmt.Errorf("create client: %w", err)
}
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
if err != nil {
return 0, fmt.Errorf("open file: %w", err)
}
// get target hash // get target hash
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength)) outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -89,10 +92,15 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
} }
// upload buffer with offset if necessary // upload buffer with offset if necessary
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil { if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
return 0, err return 0, err
} }
// close fd
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
return contentLength, fmt.Errorf("close fd: %w", err)
}
return contentLength, nil return contentLength, nil
} }
@ -125,11 +133,40 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
return result, nil return result, nil
} }
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_open",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
opts.Parameters.Set("flags", "0x0002") // O_WRITE
result := &api.FileOpenResponse{}
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("open new file descriptor: %w", err)
}
return result, nil
}
// Call pcloud file_checksum, see [API Doc.] // Call pcloud file_checksum, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html // [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
func (c *writerAt) fileChecksum( func fileChecksum(
ctx context.Context, ctx context.Context,
offset, count int64, client *rest.Client,
pacer *fs.Pacer,
fd, offset, count int64,
) (*api.FileChecksumResponse, error) { ) (*api.FileChecksumResponse, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
@ -140,26 +177,29 @@ func (c *writerAt) fileChecksum(
"Connection": "keep-alive", "Connection": "keep-alive",
}, },
} }
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10)) opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
opts.Parameters.Set("count", strconv.FormatInt(count, 10)) opts.Parameters.Set("count", strconv.FormatInt(count, 10))
result := &api.FileChecksumResponse{} result := &api.FileChecksumResponse{}
err := c.fs.pacer.CallNoRetry(func() (bool, error) { err := pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result) resp, err := client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err) err = result.Error.Update(err)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err) return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
} }
return result, nil return result, nil
} }
// Call pcloud file_pwrite, see [API Doc.] // Call pcloud file_pwrite, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html // [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
func (c *writerAt) filePWrite( func filePWrite(
ctx context.Context, ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
offset int64, offset int64,
buf []byte, buf []byte,
) (*api.FilePWriteResponse, error) { ) (*api.FilePWriteResponse, error) {
@ -176,24 +216,29 @@ func (c *writerAt) filePWrite(
"Connection": "keep-alive", "Connection": "keep-alive",
}, },
} }
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10)) opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
result := &api.FilePWriteResponse{} result := &api.FilePWriteResponse{}
err := c.fs.pacer.CallNoRetry(func() (bool, error) { err := pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result) resp, err := client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err) err = result.Error.Update(err)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err) return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
} }
return result, nil return result, nil
} }
// Call pcloud file_close, see [API Doc.] // Call pcloud file_close, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html // [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) { func fileClose(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
) (*api.FileCloseResponse, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
Path: "/file_close", Path: "/file_close",
@ -201,11 +246,11 @@ func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Close: true, Close: true,
} }
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10)) opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
result := &api.FileCloseResponse{} result := &api.FileCloseResponse{}
err := c.fs.pacer.CallNoRetry(func() (bool, error) { err := pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result) resp, err := client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err) err = result.Error.Update(err)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })

View file

@ -537,6 +537,13 @@ sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docke
``` ```
though this is rarely needed. though this is rarely needed.
If the plugin fails to work properly, and only as a last resort after you tried diagnosing with the above methods, you can try clearing the state of the plugin. **Note that all existing rclone docker volumes will probably have to be recreated.** This might be needed because a reinstall don't cleanup existing state files to allow for easy restoration, as stated above.
```
docker plugin disable rclone # disable the plugin to ensure no interference
sudo rm /var/lib/docker-plugins/rclone/cache/docker-plugin.state # removing the plugin state
docker plugin enable rclone # re-enable the plugin afterward
```
## Caveats ## Caveats
Finally I'd like to mention a _caveat with updating volume settings_. Finally I'd like to mention a _caveat with updating volume settings_.