swift: implement --swift-use-segments-container to allow >5G files on Blomp

This switches between storing chunks in a separate container suffixed
with `_segments` (the default) and a directory in the root
`.file-segments`)

By default the `.file-segments` mode will be auto selected if
`auth_url`s that require it are detected.

If the `.file-segments` mode is in use then rclone will omit that
directory from listings.

See: https://forum.rclone.org/t/blomp-unable-to-upload-5gb-files/42498/
This commit is contained in:
Nick Craig-Wood 2024-03-11 18:33:11 +00:00
parent 8a18c29835
commit 5bf70c68f1
3 changed files with 225 additions and 202 deletions

View file

@ -9,11 +9,12 @@ import (
"fmt" "fmt"
"io" "io"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/google/uuid"
"github.com/ncw/swift/v2" "github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@ -28,6 +29,7 @@ import (
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
) )
@ -37,29 +39,42 @@ const (
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.Gibi defaultChunkSize = 5 * fs.Gibi
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
segmentsContainerSuffix = "_segments"
segmentsDirectory = ".file-segments"
segmentsDirectorySlash = segmentsDirectory + "/"
) )
// Auth URLs which imply using fileSegmentsDirectory
var needFileSegmentsDirectory = regexp.MustCompile(`(?s)\.(ain?\.net|blomp\.com|praetector\.com|signmy\.name|rackfactory\.com)($|/)`)
// SharedOptions are shared between swift and backends which depend on swift // SharedOptions are shared between swift and backends which depend on swift
var SharedOptions = []fs.Option{{ var SharedOptions = []fs.Option{{
Name: "chunk_size", Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container. Help: strings.ReplaceAll(`Above this size files will be chunked.
Above this size files will be chunked into a _segments container. The Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
default for this is 5 GiB which is its maximum value.`, or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
for more info). Default for this is 5 GiB which is its maximum value, which
means only files above this size will be chunked.
Rclone uploads chunked files as dynamic large objects (DLO).
`, "|", "`"),
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "no_chunk", Name: "no_chunk",
Help: `Don't chunk files during streaming upload. Help: strings.ReplaceAll(`Don't chunk files during streaming upload.
When doing streaming uploads (e.g. using rcat or mount) setting this When doing streaming uploads (e.g. using |rcat| or |mount| with
flag will cause the swift backend to not upload chunked files. |--vfs-cache-mode off|) setting this flag will cause the swift backend
to not upload chunked files.
This will limit the maximum upload size to 5 GiB. However non chunked This will limit the maximum streamed upload size to 5 GiB. This is
files are easier to deal with and have an MD5SUM. useful because non chunked files are easier to deal with and have an
MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal Rclone will still chunk files bigger than |chunk_size| when doing
copy operations.`, normal copy operations.`, "|", "`"),
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
@ -67,11 +82,12 @@ copy operations.`,
Help: strings.ReplaceAll(`Disable support for static and dynamic large objects Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
Swift cannot transparently store files bigger than 5 GiB. There are Swift cannot transparently store files bigger than 5 GiB. There are
two schemes for doing that, static or dynamic large objects, and the two schemes for chunking large files, static large objects (SLO) or
API does not allow rclone to determine whether a file is a static or dynamic large objects (DLO), and the API does not allow rclone to
dynamic large object without doing a HEAD on the object. Since these determine whether a file is a static or dynamic large object without
need to be treated differently, this means rclone has to issue HEAD doing a HEAD on the object. Since these need to be treated
requests for objects for example when reading checksums. differently, this means rclone has to issue HEAD requests for objects
for example when reading checksums.
When |no_large_objects| is set, rclone will assume that there are no When |no_large_objects| is set, rclone will assume that there are no
static or dynamic large objects stored. This means it can stop doing static or dynamic large objects stored. This means it can stop doing
@ -82,12 +98,40 @@ Setting this option implies |no_chunk| and also that no files will be
uploaded in chunks, so files bigger than 5 GiB will just fail on uploaded in chunks, so files bigger than 5 GiB will just fail on
upload. upload.
If you set this option and there *are* static or dynamic large objects, If you set this option and there **are** static or dynamic large objects,
then this will give incorrect hashes for them. Downloads will succeed, then this will give incorrect hashes for them. Downloads will succeed,
but other operations such as Remove and Copy will fail. but other operations such as Remove and Copy will fail.
`, "|", "`"), `, "|", "`"),
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "use_segments_container",
Help: strings.ReplaceAll(`Choose destination for large object segments
Swift cannot transparently store files bigger than 5 GiB and rclone
will chunk files larger than |chunk_size| (default 5 GiB) in order to
upload them.
If this value is |true| the chunks will be stored in an additional
container named the same as the destination container but with
|`+segmentsContainerSuffix+`| appended. This means that there won't be any duplicated
data in the original container but having another container may not be
acceptable.
If this value is |false| the chunks will be stored in a
|`+segmentsDirectory+`| directory in the root of the container. This
directory will be omitted when listing the container. Some
providers (eg Blomp) require this mode as creating additional
containers isn't allowed. If it is desired to see the |`+segmentsDirectory+`|
directory in the root then this flag must be set to |true|.
If this value is |unset| (the default), then rclone will choose the value
to use. It will be |false| unless rclone detects any |auth_url|s that
it knows need it to be |true|. In this case you'll see a message in
the DEBUG log.
`, "|", "`"),
Default: fs.Tristate{},
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@ -262,6 +306,7 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"` NoChunk bool `config:"no_chunk"`
NoLargeObjects bool `config:"no_large_objects"` NoLargeObjects bool `config:"no_large_objects"`
UseSegmentsContainer fs.Tristate `config:"use_segments_container"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@ -506,6 +551,11 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
BucketBasedRootOK: true, BucketBasedRootOK: true,
SlowModTime: true, SlowModTime: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if !f.opt.UseSegmentsContainer.Valid {
f.opt.UseSegmentsContainer.Value = !needFileSegmentsDirectory.MatchString(opt.Auth)
f.opt.UseSegmentsContainer.Valid = true
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
}
if f.rootContainer != "" && f.rootDirectory != "" { if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers // Check to see if the object exists - ignoring directory markers
var info swift.Object var info swift.Object
@ -627,6 +677,10 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
if err == nil { if err == nil {
for i := range objects { for i := range objects {
object := &objects[i] object := &objects[i]
if !includeDirMarkers && !f.opt.UseSegmentsContainer.Value && (object.Name == segmentsDirectory || strings.HasPrefix(object.Name, segmentsDirectorySlash)) {
// Don't show segments in listing unless showing directory markers
continue
}
isDirectory := false isDirectory := false
if !recurse { if !recurse {
isDirectory = strings.HasSuffix(object.Name, "/") isDirectory = strings.HasSuffix(object.Name, "/")
@ -965,8 +1019,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err return nil, err
} }
if isLargeObject { if isLargeObject {
/*handle large object*/ // handle large object
err = copyLargeObject(ctx, f, srcObj, dstContainer, dstPath) err = f.copyLargeObject(ctx, srcObj, dstContainer, dstPath)
} else { } else {
srcContainer, srcPath := srcObj.split() srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@ -981,102 +1035,124 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote) return f.NewObject(ctx, remote)
} }
func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer string, dstPath string) error { // Represents a segmented upload or copy
segmentsContainer := dstContainer + "_segments" type segmentedUpload struct {
err := f.makeContainer(ctx, segmentsContainer) f *Fs // parent
dstContainer string // container for the file to live once uploaded
container string // container for the segments
dstPath string // path for the object to live once uploaded
path string // unique path for the segments
mu sync.Mutex // protects the variables below
segments []string // segments successfully uploaded
}
// Create a new segmented upload using the correct container and path
func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPath string) (su *segmentedUpload, err error) {
randomString, err := random.Password(128)
if err != nil { if err != nil {
return err return nil, fmt.Errorf("failed to create random string for upload: %w", err)
} }
segments, err := src.getSegmentsLargeObject(ctx) uniqueString := time.Now().Format("2006-01-02-150405") + "-" + randomString
su = &segmentedUpload{
f: f,
dstPath: dstPath,
path: dstPath + "/" + uniqueString,
dstContainer: dstContainer,
container: dstContainer,
}
if f.opt.UseSegmentsContainer.Value {
su.container += segmentsContainerSuffix
err = f.makeContainer(ctx, su.container)
if err != nil {
return nil, err
}
} else {
su.path = segmentsDirectorySlash + su.path
}
return su, nil
}
// Return the path of the i-th segment
func (su *segmentedUpload) segmentPath(i int) string {
return fmt.Sprintf("%s/%08d", su.path, i)
}
// Mark segment as successfully uploaded
func (su *segmentedUpload) uploaded(segment string) {
su.mu.Lock()
defer su.mu.Unlock()
su.segments = append(su.segments, segment)
}
// Return the full path including the container
func (su *segmentedUpload) fullPath() string {
return fmt.Sprintf("%s/%s", su.container, su.path)
}
// Remove segments when upload/copy process fails
func (su *segmentedUpload) onFail() {
f := su.f
if f.opt.LeavePartsOnError {
return
}
ctx := context.Background()
fs.Debugf(f, "Segment operation failed: bulk deleting failed segments")
if len(su.container) == 0 {
fs.Debugf(f, "Invalid segments container")
return
}
if len(su.segments) == 0 {
fs.Debugf(f, "No segments to delete")
return
}
_, err := f.c.BulkDelete(ctx, su.container, su.segments)
if err != nil { if err != nil {
return err fs.Errorf(f, "Failed to bulk delete failed segments: %v", err)
} }
if len(segments) == 0 { }
return errors.New("could not copy object, list segments are empty")
} // upload the manifest when upload is done
nanoSeconds := time.Now().Nanosecond() func (su *segmentedUpload) uploadManifest(ctx context.Context, contentType string, headers swift.Headers) (err error) {
prefixSegment := fmt.Sprintf("%v/%v/%s", nanoSeconds, src.size, strings.ReplaceAll(uuid.New().String(), "-", "")) delete(headers, "Etag") // remove Etag if present as it is wrong for the manifest
copiedSegmentsLen := 10 headers["X-Object-Manifest"] = urlEncode(su.fullPath())
for _, value := range segments { headers["Content-Length"] = "0" // set Content-Length as we know it
if len(value) <= 0 {
continue
}
fragment := value[0]
if len(fragment) <= 0 {
continue
}
copiedSegmentsLen = len(value)
firstIndex := strings.Index(fragment, "/")
if firstIndex < 0 {
firstIndex = 0
} else {
firstIndex = firstIndex + 1
}
lastIndex := strings.LastIndex(fragment, "/")
if lastIndex < 0 {
lastIndex = len(fragment)
} else {
lastIndex = lastIndex - 1
}
prefixSegment = fragment[firstIndex:lastIndex]
break
}
copiedSegments := make([]string, copiedSegmentsLen)
defer handleCopyFail(ctx, f, segmentsContainer, copiedSegments, err)
for c, ss := range segments {
if len(ss) <= 0 {
continue
}
for _, s := range ss {
lastIndex := strings.LastIndex(s, "/")
if lastIndex <= 0 {
lastIndex = 0
} else {
lastIndex = lastIndex + 1
}
segmentName := dstPath + "/" + prefixSegment + "/" + s[lastIndex:]
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, c, s, segmentsContainer, segmentName, nil)
copiedSegments = append(copiedSegments, segmentName)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return err
}
}
}
m := swift.Metadata{}
headers := m.ObjectHeaders()
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s/%s", segmentsContainer, dstPath, prefixSegment))
headers["Content-Length"] = "0"
emptyReader := bytes.NewReader(nil) emptyReader := bytes.NewReader(nil)
err = f.pacer.Call(func() (bool, error) { fs.Debugf(su.f, "uploading manifest %q to %q", su.dstPath, su.dstContainer)
err = su.f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectPut(ctx, dstContainer, dstPath, emptyReader, true, "", src.contentType, headers) rxHeaders, err = su.f.c.ObjectPut(ctx, su.dstContainer, su.dstPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err) return shouldRetryHeaders(ctx, rxHeaders, err)
}) })
return err return err
} }
// remove copied segments when copy process failed // Copy a large object src into (dstContainer, dstPath)
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) { func (f *Fs) copyLargeObject(ctx context.Context, src *Object, dstContainer string, dstPath string) (err error) {
fs.Debugf(f, "handle copy segment fail") su, err := f.newSegmentedUpload(ctx, dstContainer, dstPath)
if err == nil { if err != nil {
return return err
} }
if len(segmentsContainer) == 0 { srcSegmentsContainer, srcSegments, err := src.getSegmentsLargeObject(ctx)
fs.Debugf(f, "invalid segments container") if err != nil {
return return fmt.Errorf("copy large object: %w", err)
} }
if len(segments) == 0 { if len(srcSegments) == 0 {
fs.Debugf(f, "segments is empty") return errors.New("could not copy object, list segments are empty")
return
} }
fs.Debugf(f, "action delete segments what copied") defer atexit.OnError(&err, su.onFail)()
for _, v := range segments { for i, srcSegment := range srcSegments {
_ = f.c.ObjectDelete(ctx, segmentsContainer, v) dstSegment := su.segmentPath(i)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, srcSegmentsContainer, srcSegment, su.container, dstSegment, nil)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return err
}
su.uploaded(dstSegment)
} }
return su.uploadManifest(ctx, src.contentType, src.headers)
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
@ -1308,35 +1384,30 @@ func min(x, y int64) int64 {
return y return y
} }
func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]string, error) { // Get the segments for a large object
//
// It returns the names of the segments and the container that they live in
func (o *Object) getSegmentsLargeObject(ctx context.Context) (container string, segments []string, err error) {
container, objectName := o.split() container, objectName := o.split()
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName) container, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
if err != nil { if err != nil {
fs.Debugf(o, "Failed to get list segments of object: %v", err) return container, segments, fmt.Errorf("failed to get list segments of object: %w", err)
return nil, err
} }
var containerSegments = make(map[string][]string) segments = make([]string, len(segmentObjects))
for _, segment := range segmentObjects { for i := range segmentObjects {
if _, ok := containerSegments[segmentContainer]; !ok { segments[i] = segmentObjects[i].Name
containerSegments[segmentContainer] = make([]string, 0, len(segmentObjects))
}
segments := containerSegments[segmentContainer]
segments = append(segments, segment.Name)
containerSegments[segmentContainer] = segments
} }
return containerSegments, nil return container, segments, nil
} }
func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegments map[string][]string) error { // Remove the segments for a large object
if containerSegments == nil || len(containerSegments) <= 0 { func (o *Object) removeSegmentsLargeObject(ctx context.Context, container string, segments []string) error {
if len(segments) == 0 {
return nil return nil
} }
for container, segments := range containerSegments { _, err := o.fs.c.BulkDelete(ctx, container, segments)
_, err := o.fs.c.BulkDelete(ctx, container, segments) if err != nil {
if err != nil { return fmt.Errorf("failed to delete bulk segments: %w", err)
fs.Debugf(o, "Failed to delete bulk segments %v", err)
return err
}
} }
return nil return nil
} }
@ -1359,55 +1430,25 @@ func urlEncode(str string) string {
} }
// updateChunks updates the existing object using chunks to a separate // updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments. // container.
func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) { func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (err error) {
container, containerPath := o.split() container, containerPath := o.split()
segmentsContainer := container + "_segments" su, err := o.fs.newSegmentedUpload(ctx, container, containerPath)
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
return shouldRetry(ctx, err)
})
}
if err != nil { if err != nil {
return "", err return err
} }
// Upload the chunks // Upload the chunks
left := size left := size
i := 0 i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
in := bufio.NewReader(in0) in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, (size/int64(o.fs.opt.ChunkSize))+1) defer atexit.OnError(&err, su.onFail)()
defer atexit.OnError(&err, func() {
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Delete segments when err raise %v", err)
if len(segmentInfos) == 0 {
return
}
_ctx := context.Background()
deleteChunks(_ctx, o, segmentsContainer, segmentInfos)
})()
for { for {
// can we read at least one byte? // can we read at least one byte?
if _, err = in.Peek(1); err != nil { if _, err = in.Peek(1); err != nil {
if left > 0 { if left > 0 {
return "", err // read less than expected return err // read less than expected
} }
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err) fs.Debugf(o, "Uploading segments into %q seems done (%v)", su.container, err)
break break
} }
n := int64(o.fs.opt.ChunkSize) n := int64(o.fs.opt.ChunkSize)
@ -1417,49 +1458,20 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
left -= n left -= n
} }
segmentReader := io.LimitReader(in, n) segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i) segmentPath := su.segmentPath(i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer) fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, su.container)
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(ctx, segmentsContainer, segmentPath, segmentReader, true, "", "", headers) rxHeaders, err = o.fs.c.ObjectPut(ctx, su.container, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(ctx, rxHeaders, err) return shouldRetryHeaders(ctx, rxHeaders, err)
}) })
if err != nil { if err != nil {
return "", err return err
} }
su.uploaded(segmentPath)
i++ i++
} }
// Upload the manifest return su.uploadManifest(ctx, contentType, headers)
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err == nil {
//reset data
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
if len(segmentInfos) == 0 {
return
}
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(ctx, segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
}
}
} }
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
@ -1483,10 +1495,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
//capture segments before upload // Capture segments before upload
var segmentsContainer map[string][]string var segmentsContainer string
var segments []string
if isLargeObject { if isLargeObject {
segmentsContainer, _ = o.getSegmentsLargeObject(ctx) segmentsContainer, segments, _ = o.getSegmentsLargeObject(ctx)
} }
// Set the mtime // Set the mtime
@ -1497,7 +1510,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.OpenOptionAddHeaders(options, headers) fs.OpenOptionAddHeaders(options, headers)
if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects { if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
_, err = o.updateChunks(ctx, in, headers, size, contentType) err = o.updateChunks(ctx, in, headers, size, contentType)
if err != nil { if err != nil {
return err return err
} }
@ -1535,7 +1548,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if isLargeObject && len(segmentsContainer) > 0 { if isLargeObject && len(segmentsContainer) > 0 {
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container) isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
if !isInContainerVersioning { if !isInContainerVersioning {
err := o.removeSegmentsLargeObject(ctx, segmentsContainer) err := o.removeSegmentsLargeObject(ctx, segmentsContainer, segments)
if err != nil { if err != nil {
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err) fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
} }
@ -1563,10 +1576,11 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return err return err
} }
} }
//capture segments object if this object is large object // Capture segments object if this object is large object
var containerSegments map[string][]string var segmentsContainer string
var segments []string
if isLargeObject { if isLargeObject {
containerSegments, err = o.getSegmentsLargeObject(ctx) segmentsContainer, segments, err = o.getSegmentsLargeObject(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -1589,7 +1603,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} }
if isLargeObject { if isLargeObject {
return o.removeSegmentsLargeObject(ctx, containerSegments) return o.removeSegmentsLargeObject(ctx, segmentsContainer, segments)
} }
return nil return nil
} }

View file

@ -118,6 +118,9 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
f.opt.NoChunk = false f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
segmentContainer := f.root + "_segments" segmentContainer := f.root + "_segments"
if !f.opt.UseSegmentsContainer.Value {
segmentContainer = f.root
}
defer func() { defer func() {
//restore config //restore config
f.opt.ChunkSize = preConfChunkSize f.opt.ChunkSize = preConfChunkSize
@ -147,6 +150,9 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
_, _, err = f.c.Object(ctx, f.rootContainer, path) _, _, err = f.c.Object(ctx, f.rootContainer, path)
assert.Equal(t, swift.ObjectNotFound, err) assert.Equal(t, swift.ObjectNotFound, err)
prefix := path prefix := path
if !f.opt.UseSegmentsContainer.Value {
prefix = segmentsDirectory + "/" + prefix
}
objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{ objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{
Prefix: prefix, Prefix: prefix,
}) })

View file

@ -278,6 +278,9 @@ backends:
- backend: "swift" - backend: "swift"
remote: "TestSwiftAIO:" remote: "TestSwiftAIO:"
fastlist: true fastlist: true
- backend: "swift"
remote: "TestSwiftAIO,use_segments_container=false:"
fastlist: true
- backend: "swift" - backend: "swift"
remote: "TestSwift:" remote: "TestSwift:"
fastlist: true fastlist: true