forked from TrueCloudLab/rclone
Compare commits
4 commits
master
...
fix-4704-s
Author | SHA1 | Date | |
---|---|---|---|
|
b26d5da84e | ||
|
c1bf3f3999 | ||
|
fd2c373af1 | ||
|
66c8d3bf2b |
3 changed files with 49 additions and 9 deletions
|
@ -1190,8 +1190,7 @@ rclone does if you know the bucket exists already.
|
|||
// - trailing / encoding
|
||||
// so that AWS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
encoder.EncodeSlash,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
|
@ -1387,7 +1386,8 @@ func parsePath(path string) (root string) {
|
|||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
fs.Debugf(nil, "SPLIT %q %q", f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
|
@ -1500,6 +1500,9 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// Allow URI with "." etc
|
||||
awsConfig.DisableRestProtocolURICleaning = aws.Bool(true)
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
|
@ -1734,7 +1737,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
|||
// bucket to the start.
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
|
@ -1765,7 +1768,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &bucketName,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &directory,
|
||||
MaxKeys: &f.opt.ListChunk,
|
||||
|
@ -1805,7 +1808,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucketName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -1833,7 +1836,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
remote = bucket.Join(bucketName, remote)
|
||||
}
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
|
@ -1861,7 +1864,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
remote = bucket.Join(bucketName, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
|
@ -2147,7 +2150,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
|||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
|
|
|
@ -29,6 +29,23 @@ func Split(absPath string) (bucket, bucketPath string) {
|
|||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
|
||||
// Join joins any number of path elements into a single path, adding a
|
||||
// separating slash if necessary. Empty elements are ignored.
|
||||
//
|
||||
// Unlike path.Join this does not run path.Clean on the elements so a
|
||||
// path called "." will be preserved.
|
||||
func Join(elem ...string) (out string) {
|
||||
for _, e := range elem {
|
||||
if e != "" {
|
||||
if out != "" {
|
||||
out += "/"
|
||||
}
|
||||
out += e
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Cache stores whether buckets are available and their IDs
|
||||
type Cache struct {
|
||||
mu sync.Mutex // mutex to protect created and deleted
|
||||
|
|
|
@ -2,6 +2,7 @@ package bucket
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -24,6 +25,25 @@ func TestSplit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJoin(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []string
|
||||
want string
|
||||
}{
|
||||
{in: []string{}, want: ""},
|
||||
{in: []string{""}, want: ""},
|
||||
{in: []string{"", ""}, want: ""},
|
||||
{in: []string{"", "b"}, want: "b"},
|
||||
{in: []string{"a", ""}, want: "a"},
|
||||
{in: []string{"a", "b"}, want: "a/b"},
|
||||
{in: []string{"a/b/c", "..", "."}, want: "a/b/c/../."},
|
||||
} {
|
||||
got := Join(test.in...)
|
||||
what := fmt.Sprintf("Join(%q)", test.in)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
c := NewCache()
|
||||
errBoom := errors.New("boom")
|
||||
|
|
Loading…
Add table
Reference in a new issue