|
|
|
@ -10,7 +10,9 @@ package webdav
|
|
|
|
|
import (
|
|
|
|
|
"bytes"
|
|
|
|
|
"context"
|
|
|
|
|
"crypto/md5"
|
|
|
|
|
"crypto/tls"
|
|
|
|
|
"encoding/hex"
|
|
|
|
|
"encoding/xml"
|
|
|
|
|
"fmt"
|
|
|
|
|
"io"
|
|
|
|
@ -18,6 +20,7 @@ import (
|
|
|
|
|
"net/url"
|
|
|
|
|
"os/exec"
|
|
|
|
|
"path"
|
|
|
|
|
"regexp"
|
|
|
|
|
"strconv"
|
|
|
|
|
"strings"
|
|
|
|
|
"sync"
|
|
|
|
@ -34,6 +37,7 @@ import (
|
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
|
"github.com/rclone/rclone/lib/atexit"
|
|
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
|
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
|
|
|
|
"github.com/rclone/rclone/lib/rest"
|
|
|
|
@ -113,6 +117,14 @@ func init() {
|
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
|
Help: configEncodingHelp,
|
|
|
|
|
Advanced: true,
|
|
|
|
|
}, {
|
|
|
|
|
Name: "chunk_size",
|
|
|
|
|
Help: `Chunk size to use for uploading (Nextcloud only)
|
|
|
|
|
|
|
|
|
|
Set to 0 to disable chunked uploading.
|
|
|
|
|
`,
|
|
|
|
|
Advanced: true,
|
|
|
|
|
Default: fs.SizeSuffix(0), // off by default
|
|
|
|
|
}},
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
@ -126,6 +138,7 @@ type Options struct {
|
|
|
|
|
BearerToken string `config:"bearer_token"`
|
|
|
|
|
BearerTokenCommand string `config:"bearer_token_command"`
|
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
|
|
|
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fs represents a remote webdav
|
|
|
|
@ -136,6 +149,7 @@ type Fs struct {
|
|
|
|
|
features *fs.Features // optional features
|
|
|
|
|
endpoint *url.URL // URL of the host
|
|
|
|
|
endpointURL string // endpoint as a string
|
|
|
|
|
uploadURL string // upload URL for nextcloud chunked
|
|
|
|
|
srv *rest.Client // the connection to the one drive server
|
|
|
|
|
pacer *fs.Pacer // pacer for API calls
|
|
|
|
|
precision time.Duration // mod time precision
|
|
|
|
@ -146,6 +160,7 @@ type Fs struct {
|
|
|
|
|
hasMD5 bool // set if can use owncloud style checksums for MD5
|
|
|
|
|
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
|
|
|
|
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
|
|
|
|
canChunk bool // set if nextcloud and chunk_size is set
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Object describes a webdav object
|
|
|
|
@ -457,6 +472,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// set the chunk size for testing
|
|
|
|
|
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
|
|
|
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// sets the BearerToken up
|
|
|
|
|
func (f *Fs) setBearerToken(token string) {
|
|
|
|
|
f.opt.BearerToken = token
|
|
|
|
@ -500,6 +521,8 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var matchNextcloudURL = regexp.MustCompile(`^.*/dav/files/[^/]+/?$`)
|
|
|
|
|
|
|
|
|
|
// setQuirks adjusts the Fs for the vendor passed in
|
|
|
|
|
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
|
|
|
|
switch vendor {
|
|
|
|
@ -513,6 +536,12 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
|
|
|
|
f.precision = time.Second
|
|
|
|
|
f.useOCMtime = true
|
|
|
|
|
f.hasSHA1 = true
|
|
|
|
|
f.canChunk = true
|
|
|
|
|
if f.opt.ChunkSize != 0 && !matchNextcloudURL.MatchString(f.endpointURL) {
|
|
|
|
|
return errors.New("chunked upload with nextcloud must use /dav/files/USER endpoint not /webdav")
|
|
|
|
|
}
|
|
|
|
|
f.uploadURL = strings.Replace(f.endpointURL, "/dav/files/", "/dav/uploads/", 1)
|
|
|
|
|
fs.Logf(nil, f.uploadURL)
|
|
|
|
|
case "sharepoint":
|
|
|
|
|
// To mount sharepoint, two Cookies are required
|
|
|
|
|
// They have to be set instead of BasicAuth
|
|
|
|
@ -956,7 +985,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
|
|
|
|
dstPath := f.filePath(remote)
|
|
|
|
|
err := f.mkParentDir(ctx, dstPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "Copy mkParentDir failed")
|
|
|
|
|
return nil, errors.Wrap(err, "copy mkParentDir failed")
|
|
|
|
|
}
|
|
|
|
|
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
|
|
|
|
if err != nil {
|
|
|
|
@ -980,11 +1009,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
|
|
|
|
return f.shouldRetry(ctx, resp, err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "Copy call failed")
|
|
|
|
|
return nil, errors.Wrap(err, "copy call failed")
|
|
|
|
|
}
|
|
|
|
|
dstObj, err := f.NewObject(ctx, remote)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "Copy NewObject failed")
|
|
|
|
|
return nil, errors.Wrap(err, "copy NewObject failed")
|
|
|
|
|
}
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
@ -1047,18 +1076,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|
|
|
|
return fs.ErrorDirExists
|
|
|
|
|
}
|
|
|
|
|
if err != fs.ErrorDirNotFound {
|
|
|
|
|
return errors.Wrap(err, "DirMove dirExists dst failed")
|
|
|
|
|
return errors.Wrap(err, "dirMove dirExists dst failed")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Make sure the parent directory exists
|
|
|
|
|
err = f.mkParentDir(ctx, dstPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
|
|
|
|
return errors.Wrap(err, "dirMove mkParentDir dst failed")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "DirMove couldn't join URL")
|
|
|
|
|
return errors.Wrap(err, "dirMove couldn't join URL")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var resp *http.Response
|
|
|
|
@ -1076,7 +1105,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|
|
|
|
return f.shouldRetry(ctx, resp, err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "DirMove MOVE call failed")
|
|
|
|
|
return errors.Wrap(err, "dirMove MOVE call failed")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
@ -1259,39 +1288,67 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
|
|
|
|
err = o.fs.mkParentDir(ctx, o.filePath())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "Update mkParentDir failed")
|
|
|
|
|
return errors.Wrap(err, "update mkParentDir failed")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size := src.Size()
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "PUT",
|
|
|
|
|
Path: o.filePath(),
|
|
|
|
|
Body: in,
|
|
|
|
|
NoResponse: true,
|
|
|
|
|
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
|
|
|
|
ContentType: fs.MimeType(ctx, src),
|
|
|
|
|
Options: options,
|
|
|
|
|
if o.fs.canChunk && o.fs.opt.ChunkSize > 0 && size > int64(o.fs.opt.ChunkSize) {
|
|
|
|
|
err = o.updateChunked(ctx, in, src, options...)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
contentType := fs.MimeType(ctx, src)
|
|
|
|
|
filePath := o.filePath()
|
|
|
|
|
extraHeaders := o.extraHeaders(ctx, src)
|
|
|
|
|
err = o.updateSimple(ctx, in, filePath, size, contentType, extraHeaders, o.fs.endpointURL, options...)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// read metadata from remote
|
|
|
|
|
o.hasMetaData = false
|
|
|
|
|
return o.readMetaData(ctx)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string]string {
|
|
|
|
|
extraHeaders := map[string]string{}
|
|
|
|
|
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
|
|
|
|
opts.ExtraHeaders = map[string]string{}
|
|
|
|
|
if o.fs.useOCMtime {
|
|
|
|
|
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
|
|
|
|
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
|
|
|
|
}
|
|
|
|
|
// Set one upload checksum
|
|
|
|
|
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
|
|
|
|
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
|
|
|
|
if o.fs.hasSHA1 {
|
|
|
|
|
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
|
|
|
|
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
|
|
|
|
extraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
|
|
|
|
if o.fs.hasMD5 && extraHeaders["OC-Checksum"] == "" {
|
|
|
|
|
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
|
|
|
|
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
|
|
|
|
extraHeaders["OC-Checksum"] = "MD5:" + md5
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return extraHeaders
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Standard update
|
|
|
|
|
func (o *Object) updateSimple(ctx context.Context, in io.Reader, filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "PUT",
|
|
|
|
|
Path: filePath,
|
|
|
|
|
Body: in,
|
|
|
|
|
NoResponse: true,
|
|
|
|
|
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
|
|
|
|
ContentType: contentType,
|
|
|
|
|
Options: options,
|
|
|
|
|
ExtraHeaders: extraHeaders,
|
|
|
|
|
RootURL: rootURL,
|
|
|
|
|
}
|
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
|
|
|
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
|
|
|
|
return o.fs.shouldRetry(ctx, resp, err)
|
|
|
|
@ -1307,9 +1364,85 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|
|
|
|
_ = o.Remove(ctx)
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
// read metadata from remote
|
|
|
|
|
o.hasMetaData = false
|
|
|
|
|
return o.readMetaData(ctx)
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Chunked update for Nextcloud (see
|
|
|
|
|
// https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html)
|
|
|
|
|
func (o *Object) updateChunked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
|
|
|
|
hasher := md5.New()
|
|
|
|
|
_, err = hasher.Write([]byte(o.filePath()))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "chunked upload couldn't hash URL")
|
|
|
|
|
}
|
|
|
|
|
uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
|
|
|
|
|
fs.Debugf(src, "Starting multipart upload to temp dir %q", uploadDir)
|
|
|
|
|
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "MKCOL",
|
|
|
|
|
Path: uploadDir + "/",
|
|
|
|
|
NoResponse: true,
|
|
|
|
|
RootURL: o.fs.uploadURL,
|
|
|
|
|
}
|
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
|
resp, err := o.fs.srv.Call(ctx, &opts)
|
|
|
|
|
return o.fs.shouldRetry(ctx, resp, err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "making upload directory failed")
|
|
|
|
|
}
|
|
|
|
|
defer atexit.OnError(&err, func() {
|
|
|
|
|
// Try to abort the upload, but ignore the error.
|
|
|
|
|
fs.Debugf(src, "Cancelling chunked upload")
|
|
|
|
|
_ = o.fs.Purge(ctx, uploadDir)
|
|
|
|
|
})()
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
size = src.Size()
|
|
|
|
|
uploadedSize = int64(0)
|
|
|
|
|
partObj = &Object{
|
|
|
|
|
fs: o.fs,
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
for uploadedSize < size {
|
|
|
|
|
// Upload chunk
|
|
|
|
|
contentLength := int64(partObj.fs.opt.ChunkSize)
|
|
|
|
|
if size-uploadedSize < contentLength {
|
|
|
|
|
contentLength = size - uploadedSize
|
|
|
|
|
}
|
|
|
|
|
partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, uploadedSize, uploadedSize+contentLength)
|
|
|
|
|
extraHeaders := map[string]string{}
|
|
|
|
|
err = partObj.updateSimple(ctx, io.LimitReader(in, int64(partObj.fs.opt.ChunkSize)), partObj.remote, contentLength, "", extraHeaders, o.fs.uploadURL, options...)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "uploading chunk failed")
|
|
|
|
|
}
|
|
|
|
|
uploadedSize += contentLength
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Finish
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
opts = rest.Opts{
|
|
|
|
|
Method: "MOVE",
|
|
|
|
|
Path: o.fs.filePath(path.Join(uploadDir, ".file")),
|
|
|
|
|
NoResponse: true,
|
|
|
|
|
Options: options,
|
|
|
|
|
RootURL: o.fs.uploadURL,
|
|
|
|
|
}
|
|
|
|
|
destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "finalize chunked upload couldn't join URL")
|
|
|
|
|
}
|
|
|
|
|
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
|
|
|
|
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
|
|
|
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
|
|
|
|
return o.fs.shouldRetry(ctx, resp, err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "finalize chunked upload failed")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove an object
|
|
|
|
|