2019-06-09 17:41:48 +00:00
|
|
|
package chunker
|
|
|
|
|
|
|
|
import (
|
2019-10-09 09:21:45 +00:00
|
|
|
"bytes"
|
2019-07-25 14:16:39 +00:00
|
|
|
"context"
|
2019-06-09 17:41:48 +00:00
|
|
|
"flag"
|
|
|
|
"fmt"
|
2022-08-20 14:38:02 +00:00
|
|
|
"io"
|
2019-10-09 09:21:45 +00:00
|
|
|
"path"
|
|
|
|
"regexp"
|
|
|
|
"strings"
|
2019-06-09 17:41:48 +00:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/rclone/rclone/fs"
|
2021-10-11 12:35:06 +00:00
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/fspath"
|
2019-10-09 10:24:03 +00:00
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2021-01-04 01:08:22 +00:00
|
|
|
"github.com/rclone/rclone/fs/object"
|
2019-10-09 09:21:45 +00:00
|
|
|
"github.com/rclone/rclone/fs/operations"
|
2019-06-09 17:41:48 +00:00
|
|
|
"github.com/rclone/rclone/fstest"
|
|
|
|
"github.com/rclone/rclone/fstest/fstests"
|
2019-10-09 09:21:45 +00:00
|
|
|
"github.com/rclone/rclone/lib/random"
|
2019-06-09 17:41:48 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-10-09 09:21:45 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-06-09 17:41:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Command line flags
|
|
|
|
var (
|
|
|
|
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
|
|
|
|
)
|
|
|
|
|
|
|
|
// test that chunking does not break large uploads
|
2019-10-04 01:05:45 +00:00
|
|
|
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
2019-06-09 17:41:48 +00:00
|
|
|
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
|
2019-07-25 14:16:39 +00:00
|
|
|
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
2019-06-09 17:41:48 +00:00
|
|
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
|
|
|
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
2021-03-02 19:11:57 +00:00
|
|
|
Size: int64(kilobytes) * int64(fs.Kibi),
|
2019-06-09 17:41:48 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-10-11 12:35:06 +00:00
|
|
|
type settings map[string]interface{}
|
|
|
|
|
|
|
|
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
|
|
|
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
|
|
|
configMap := configmap.Simple{}
|
|
|
|
for key, val := range opts {
|
|
|
|
configMap[key] = fmt.Sprintf("%v", val)
|
|
|
|
}
|
|
|
|
rpath := fspath.JoinRootPath(f.Root(), path)
|
|
|
|
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
|
|
|
fixFs, err := fs.NewFs(ctx, remote)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return fixFs
|
|
|
|
}
|
|
|
|
|
|
|
|
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
|
|
|
|
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
|
|
|
item := fstest.Item{Path: name, ModTime: mtime1}
|
2022-06-27 11:29:13 +00:00
|
|
|
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
2021-10-11 12:35:06 +00:00
|
|
|
assert.NotNil(t, obj, message)
|
|
|
|
return obj
|
|
|
|
}
|
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
// test chunk name parser
|
|
|
|
func testChunkNameFormat(t *testing.T, f *Fs) {
|
|
|
|
saveOpt := f.opt
|
2019-06-09 17:41:48 +00:00
|
|
|
defer func() {
|
2019-10-04 01:05:45 +00:00
|
|
|
// restore original settings (f is pointer, f.opt is struct)
|
|
|
|
f.opt = saveOpt
|
|
|
|
_ = f.setChunkNameFormat(f.opt.NameFormat)
|
2019-06-09 17:41:48 +00:00
|
|
|
}()
|
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
|
|
|
|
err := f.setChunkNameFormat(pattern)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, wantDataFormat, f.dataNameFmt)
|
|
|
|
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
|
|
|
|
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
|
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
assertFormatValid := func(pattern string) {
|
|
|
|
err := f.setChunkNameFormat(pattern)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
assertFormatInvalid := func(pattern string) {
|
|
|
|
err := f.setChunkNameFormat(pattern)
|
|
|
|
assert.Error(t, err)
|
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
|
|
|
|
gotChunkName := ""
|
|
|
|
assert.NotPanics(t, func() {
|
|
|
|
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
|
|
|
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
|
|
|
|
if gotChunkName != "" {
|
|
|
|
assert.Equal(t, wantChunkName, gotChunkName)
|
|
|
|
}
|
2019-10-04 01:05:45 +00:00
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
|
2019-10-04 01:05:45 +00:00
|
|
|
assert.Panics(t, func() {
|
2019-12-04 10:43:58 +00:00
|
|
|
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
|
|
|
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
|
2019-10-04 01:05:45 +00:00
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-12-04 10:43:58 +00:00
|
|
|
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
|
|
|
|
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
|
2019-10-04 01:05:45 +00:00
|
|
|
assert.Equal(t, wantMainName, gotMainName)
|
|
|
|
assert.Equal(t, wantChunkNo, gotChunkNo)
|
|
|
|
assert.Equal(t, wantCtrlType, gotCtrlType)
|
2019-12-04 10:43:58 +00:00
|
|
|
assert.Equal(t, wantXactID, gotXactID)
|
2019-10-04 01:05:45 +00:00
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
const newFormatSupported = false // support for patterns not starting with base name (*)
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
// valid formats
|
2019-12-04 10:43:58 +00:00
|
|
|
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
|
|
|
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
|
|
|
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
|
|
|
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
|
|
|
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
2019-10-04 01:05:45 +00:00
|
|
|
if newFormatSupported {
|
2019-12-04 10:43:58 +00:00
|
|
|
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
2019-10-04 01:05:45 +00:00
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
// invalid formats
|
|
|
|
assertFormatInvalid(`chunk-#`)
|
|
|
|
assertFormatInvalid(`*-chunk`)
|
|
|
|
assertFormatInvalid(`*-*-chunk-#`)
|
|
|
|
assertFormatInvalid(`*-chunk-#-#`)
|
|
|
|
assertFormatInvalid(`#-chunk-*`)
|
|
|
|
assertFormatInvalid(`*/#`)
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
assertFormatValid(`*#`)
|
|
|
|
assertFormatInvalid(`**#`)
|
|
|
|
assertFormatInvalid(`#*`)
|
|
|
|
assertFormatInvalid(``)
|
|
|
|
assertFormatInvalid(`-`)
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
// quick tests
|
|
|
|
if newFormatSupported {
|
2019-12-04 10:43:58 +00:00
|
|
|
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
|
2019-10-04 01:05:45 +00:00
|
|
|
f.opt.StartFrom = 1
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeName(`part_fish_1`, "fish", 0, "", "")
|
|
|
|
assertParseName(`part_fish_43`, "fish", 42, "", "")
|
|
|
|
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
|
|
|
|
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
|
|
|
|
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
|
|
|
|
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
|
|
|
|
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
|
|
|
|
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
|
|
|
|
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
|
|
|
|
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
|
|
|
|
|
|
|
|
// old-style temporary suffix (parse only)
|
|
|
|
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
|
|
|
|
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
|
2019-10-04 01:05:45 +00:00
|
|
|
}
|
2019-06-09 17:41:48 +00:00
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
// prepare format for long tests
|
2019-12-04 10:43:58 +00:00
|
|
|
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
2019-06-09 17:41:48 +00:00
|
|
|
f.opt.StartFrom = 2
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// valid data chunks
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
|
|
|
|
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
|
|
|
|
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
|
|
|
|
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
|
|
|
|
|
|
|
|
// valid temporary data chunks
|
|
|
|
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
|
|
|
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
|
|
|
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
|
|
|
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
|
|
|
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
|
|
|
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
|
|
|
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
|
|
|
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
|
|
|
|
|
|
|
// valid temporary data chunks (old temporary suffix, only parse)
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
|
|
|
|
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// parsing invalid data chunk names
|
2019-12-04 10:43:58 +00:00
|
|
|
assertParseName(`fish.chunk.3`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.001`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.21`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.-21`, "", -1, "", "")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
|
|
|
|
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
|
|
|
|
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
|
|
|
|
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
|
|
|
|
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
|
|
|
|
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
|
|
|
|
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
|
|
|
|
|
|
|
|
// parsing invalid data chunk names (old temporary suffix)
|
|
|
|
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// valid control chunks
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
|
|
|
|
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
|
|
|
|
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
|
|
|
|
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
2019-12-04 10:43:58 +00:00
|
|
|
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
|
|
|
|
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
|
|
|
|
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// valid temporary control chunks
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
|
|
|
|
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
|
|
|
|
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
|
|
|
|
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
|
|
|
|
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
|
|
|
|
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
|
|
|
|
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
|
|
|
|
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
|
|
|
|
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
|
|
|
|
|
|
|
|
// valid temporary control chunks (old temporary suffix, parse only)
|
|
|
|
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
|
|
|
|
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
|
|
|
|
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
|
|
|
|
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// parsing invalid control chunk names
|
2019-12-04 10:43:58 +00:00
|
|
|
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
|
|
|
|
assertParseName(`fish.chunk.info`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.locks`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
|
|
|
|
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
|
|
|
|
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
|
|
|
|
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
|
|
|
|
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
|
|
|
|
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
|
|
|
|
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
|
|
|
|
|
|
|
|
// parsing invalid temporary control chunks
|
|
|
|
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
|
|
|
|
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
|
|
|
|
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
|
|
|
|
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
|
|
|
|
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
|
|
|
|
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
|
|
|
|
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
|
|
|
|
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
|
|
|
|
|
|
|
|
assertMakeNamePanics("fish", -1, "in", "")
|
|
|
|
assertMakeNamePanics("fish", -1, "up", "4")
|
|
|
|
assertMakeNamePanics("fish", -1, "x", "")
|
|
|
|
assertMakeNamePanics("fish", -1, "c", "1z")
|
|
|
|
|
|
|
|
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
|
|
|
|
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
|
|
|
|
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
|
|
|
|
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
|
|
|
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
|
|
|
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
|
|
|
|
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
|
|
|
|
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
|
|
|
|
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
|
|
|
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
|
|
|
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// base file name can sometimes look like a valid chunk name
|
2019-12-04 10:43:58 +00:00
|
|
|
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
|
|
|
|
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
|
|
|
|
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
|
|
|
|
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
|
|
|
|
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
|
|
|
|
|
|
|
|
// base file name looking like a valid chunk name (old temporary suffix)
|
|
|
|
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
|
|
|
|
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
|
|
|
|
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
|
|
|
|
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
|
|
|
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
|
|
|
|
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
2019-10-04 01:05:45 +00:00
|
|
|
|
|
|
|
// attempts to make invalid chunk names
|
2019-12-04 10:43:58 +00:00
|
|
|
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
|
|
|
|
assertMakeNamePanics("fish", 0, "info", "") // both data and control
|
|
|
|
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
|
|
|
|
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
|
|
|
|
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "_info", "")
|
|
|
|
assertMakeNamePanics("fish", -1, "info_", "")
|
|
|
|
assertMakeNamePanics("fish", -2, ".bind", "")
|
|
|
|
assertMakeNamePanics("fish", -2, "bind.", "")
|
|
|
|
|
|
|
|
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
|
|
|
|
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
|
|
|
|
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
|
|
|
|
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
|
|
|
|
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
|
|
|
|
assertMakeNamePanics("fish", -1, "_info", "5678")
|
|
|
|
assertMakeNamePanics("fish", -1, "info_", "999")
|
|
|
|
assertMakeNamePanics("fish", -2, ".bind", "0")
|
|
|
|
assertMakeNamePanics("fish", -2, "bind.", "0")
|
|
|
|
|
|
|
|
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
|
|
|
|
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
|
|
|
|
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
|
|
|
|
assertMakeNamePanics("fish", 0, "", "_123")
|
2019-06-09 17:41:48 +00:00
|
|
|
}
|
|
|
|
|
2019-10-09 09:21:45 +00:00
|
|
|
func testSmallFileInternals(t *testing.T, f *Fs) {
|
|
|
|
const dir = "small"
|
|
|
|
ctx := context.Background()
|
|
|
|
saveOpt := f.opt
|
|
|
|
defer func() {
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
f.opt = saveOpt
|
|
|
|
}()
|
|
|
|
f.opt.FailHard = false
|
|
|
|
|
|
|
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
|
|
|
|
checkSmallFileInternals := func(obj fs.Object) {
|
|
|
|
assert.NotNil(t, obj)
|
|
|
|
o, ok := obj.(*Object)
|
|
|
|
assert.True(t, ok)
|
|
|
|
assert.NotNil(t, o)
|
|
|
|
if o == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case !f.useMeta:
|
|
|
|
// If meta format is "none", non-chunked file (even empty)
|
|
|
|
// internally is a single chunk without meta object.
|
|
|
|
assert.Nil(t, o.main)
|
|
|
|
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
|
|
|
|
assert.Equal(t, 1, len(o.chunks))
|
2019-10-09 10:24:03 +00:00
|
|
|
case f.hashAll:
|
|
|
|
// Consistent hashing forces meta object on small files too
|
|
|
|
assert.NotNil(t, o.main)
|
|
|
|
assert.True(t, o.isComposite())
|
|
|
|
assert.Equal(t, 1, len(o.chunks))
|
2019-10-09 09:21:45 +00:00
|
|
|
default:
|
|
|
|
// normally non-chunked file is kept in the Object's main field
|
|
|
|
assert.NotNil(t, o.main)
|
|
|
|
assert.False(t, o.isComposite())
|
|
|
|
assert.Equal(t, 0, len(o.chunks))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
checkContents := func(obj fs.Object, contents string) {
|
|
|
|
assert.NotNil(t, obj)
|
|
|
|
assert.Equal(t, int64(len(contents)), obj.Size())
|
|
|
|
|
|
|
|
r, err := obj.Open(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NotNil(t, r)
|
|
|
|
if r == nil {
|
|
|
|
return
|
|
|
|
}
|
2022-08-20 14:38:02 +00:00
|
|
|
data, err := io.ReadAll(r)
|
2019-10-09 09:21:45 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, contents, string(data))
|
|
|
|
_ = r.Close()
|
|
|
|
}
|
|
|
|
|
2019-10-09 10:24:03 +00:00
|
|
|
checkHashsum := func(obj fs.Object) {
|
|
|
|
var ht hash.Type
|
|
|
|
switch {
|
|
|
|
case !f.hashAll:
|
|
|
|
return
|
|
|
|
case f.useMD5:
|
|
|
|
ht = hash.MD5
|
|
|
|
case f.useSHA1:
|
|
|
|
ht = hash.SHA1
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// even empty files must have hashsum in consistent mode
|
|
|
|
sum, err := obj.Hash(ctx, ht)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NotEqual(t, sum, "")
|
|
|
|
}
|
|
|
|
|
2019-10-09 09:21:45 +00:00
|
|
|
checkSmallFile := func(name, contents string) {
|
|
|
|
filename := path.Join(dir, name)
|
|
|
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
2022-06-27 11:29:13 +00:00
|
|
|
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
2019-10-09 09:21:45 +00:00
|
|
|
assert.NotNil(t, put)
|
|
|
|
checkSmallFileInternals(put)
|
|
|
|
checkContents(put, contents)
|
2019-10-09 10:24:03 +00:00
|
|
|
checkHashsum(put)
|
2019-10-09 09:21:45 +00:00
|
|
|
|
|
|
|
// objects returned by Put and NewObject must have similar structure
|
|
|
|
obj, err := f.NewObject(ctx, filename)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NotNil(t, obj)
|
|
|
|
checkSmallFileInternals(obj)
|
|
|
|
checkContents(obj, contents)
|
2019-10-09 10:24:03 +00:00
|
|
|
checkHashsum(obj)
|
2019-10-09 09:21:45 +00:00
|
|
|
|
|
|
|
_ = obj.Remove(ctx)
|
|
|
|
_ = put.Remove(ctx) // for good
|
|
|
|
}
|
|
|
|
|
|
|
|
checkSmallFile("emptyfile", "")
|
|
|
|
checkSmallFile("smallfile", "Ok")
|
|
|
|
}
|
|
|
|
|
|
|
|
func testPreventCorruption(t *testing.T, f *Fs) {
|
|
|
|
if f.opt.ChunkSize > 50 {
|
|
|
|
t.Skip("this test requires small chunks")
|
|
|
|
}
|
|
|
|
const dir = "corrupted"
|
|
|
|
ctx := context.Background()
|
|
|
|
saveOpt := f.opt
|
|
|
|
defer func() {
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
f.opt = saveOpt
|
|
|
|
}()
|
|
|
|
f.opt.FailHard = true
|
|
|
|
|
|
|
|
contents := random.String(250)
|
|
|
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
const overlapMessage = "chunk overlap"
|
|
|
|
|
|
|
|
assertOverlapError := func(err error) {
|
|
|
|
assert.Error(t, err)
|
|
|
|
if err != nil {
|
|
|
|
assert.Contains(t, err.Error(), overlapMessage)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newFile := func(name string) fs.Object {
|
|
|
|
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
2022-06-27 11:29:13 +00:00
|
|
|
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
2019-10-09 09:21:45 +00:00
|
|
|
require.NotNil(t, obj)
|
|
|
|
return obj
|
|
|
|
}
|
|
|
|
billyObj := newFile("billy")
|
2020-10-30 20:30:04 +00:00
|
|
|
billyTxn := billyObj.(*Object).xactID
|
|
|
|
if f.useNoRename {
|
|
|
|
require.True(t, billyTxn != "")
|
|
|
|
} else {
|
|
|
|
require.True(t, billyTxn == "")
|
|
|
|
}
|
2019-10-09 09:21:45 +00:00
|
|
|
|
|
|
|
billyChunkName := func(chunkNo int) string {
|
2020-10-30 20:30:04 +00:00
|
|
|
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
|
2019-10-09 09:21:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err := f.Mkdir(ctx, billyChunkName(1))
|
|
|
|
assertOverlapError(err)
|
|
|
|
|
|
|
|
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
|
|
|
|
assert.Error(t, err)
|
|
|
|
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
|
|
|
|
|
|
|
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
|
|
|
|
assert.Error(t, err)
|
|
|
|
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
|
|
|
|
|
|
|
// accessing chunks in strict mode is prohibited
|
|
|
|
f.opt.FailHard = true
|
|
|
|
billyChunk4Name := billyChunkName(4)
|
2020-10-30 20:30:04 +00:00
|
|
|
_, err = f.base.NewObject(ctx, billyChunk4Name)
|
|
|
|
require.NoError(t, err)
|
2021-03-04 11:26:48 +00:00
|
|
|
_, err = f.NewObject(ctx, billyChunk4Name)
|
2019-10-09 09:21:45 +00:00
|
|
|
assertOverlapError(err)
|
|
|
|
|
|
|
|
f.opt.FailHard = false
|
2021-03-04 11:26:48 +00:00
|
|
|
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
2019-10-09 09:21:45 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
require.NotNil(t, billyChunk4)
|
|
|
|
|
|
|
|
f.opt.FailHard = true
|
|
|
|
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
|
|
|
|
assertOverlapError(err)
|
|
|
|
|
|
|
|
// you can freely read chunks (if you have an object)
|
|
|
|
r, err := billyChunk4.Open(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
var chunkContents []byte
|
|
|
|
assert.NotPanics(t, func() {
|
2022-08-20 14:38:02 +00:00
|
|
|
chunkContents, err = io.ReadAll(r)
|
2019-10-09 21:33:05 +00:00
|
|
|
_ = r.Close()
|
2019-10-09 09:21:45 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NotEqual(t, contents, string(chunkContents))
|
|
|
|
|
|
|
|
// but you can't change them
|
|
|
|
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
|
|
|
|
assertOverlapError(err)
|
|
|
|
|
|
|
|
// Remove isn't special, you can't corrupt files even if you have an object
|
|
|
|
err = billyChunk4.Remove(ctx)
|
|
|
|
assertOverlapError(err)
|
|
|
|
|
|
|
|
// recreate billy in case it was anyhow corrupted
|
|
|
|
willyObj := newFile("willy")
|
2020-10-30 20:30:04 +00:00
|
|
|
willyTxn := willyObj.(*Object).xactID
|
|
|
|
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
|
2019-10-09 09:21:45 +00:00
|
|
|
f.opt.FailHard = false
|
|
|
|
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
|
|
|
f.opt.FailHard = true
|
|
|
|
assert.NoError(t, err)
|
|
|
|
require.NotNil(t, willyChunk)
|
|
|
|
|
|
|
|
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
|
|
|
|
assertOverlapError(err)
|
|
|
|
|
|
|
|
// operations.Move will return error when chunker's Move refused
|
|
|
|
// to corrupt target file, but reverts to copy/delete method
|
|
|
|
// still trying to delete target chunk. Chunker must come to rescue.
|
|
|
|
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
|
|
|
|
assertOverlapError(err)
|
|
|
|
r, err = willyChunk.Open(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NotPanics(t, func() {
|
2022-08-20 14:38:02 +00:00
|
|
|
_, err = io.ReadAll(r)
|
2019-10-09 21:33:05 +00:00
|
|
|
_ = r.Close()
|
2019-10-09 09:21:45 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
|
|
|
if f.opt.ChunkSize > 50 {
|
|
|
|
t.Skip("this test requires small chunks")
|
|
|
|
}
|
|
|
|
const dir = "wreaked"
|
|
|
|
const wreakNumber = 10200300
|
|
|
|
ctx := context.Background()
|
|
|
|
saveOpt := f.opt
|
|
|
|
defer func() {
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
f.opt = saveOpt
|
|
|
|
}()
|
|
|
|
|
|
|
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
contents := random.String(100)
|
|
|
|
|
2020-10-30 20:30:04 +00:00
|
|
|
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
|
|
|
filename = path.Join(dir, name)
|
2019-10-09 09:21:45 +00:00
|
|
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
2022-06-27 11:29:13 +00:00
|
|
|
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
2019-10-09 09:21:45 +00:00
|
|
|
require.NotNil(t, obj)
|
2020-10-30 20:30:04 +00:00
|
|
|
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
|
|
|
txnID = chunkObj.xactID
|
|
|
|
}
|
|
|
|
return
|
2019-10-09 09:21:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
f.opt.FailHard = false
|
2020-10-30 20:30:04 +00:00
|
|
|
file, fileName, fileTxn := newFile(f, "wreaker")
|
|
|
|
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
|
2019-10-09 09:21:45 +00:00
|
|
|
|
|
|
|
f.opt.FailHard = false
|
|
|
|
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
|
|
|
_, err := f.NewObject(ctx, fileName)
|
|
|
|
assert.Error(t, err)
|
|
|
|
|
|
|
|
f.opt.FailHard = true
|
|
|
|
_, err = f.List(ctx, dir)
|
|
|
|
assert.Error(t, err)
|
|
|
|
_, err = f.NewObject(ctx, fileName)
|
|
|
|
assert.Error(t, err)
|
|
|
|
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = wreak.Remove(ctx)
|
|
|
|
_ = file.Remove(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testMetadataInput(t *testing.T, f *Fs) {
|
|
|
|
const minChunkForTest = 50
|
|
|
|
if f.opt.ChunkSize < minChunkForTest {
|
|
|
|
t.Skip("this test requires chunks that fit metadata")
|
|
|
|
}
|
|
|
|
|
|
|
|
const dir = "usermeta"
|
|
|
|
ctx := context.Background()
|
|
|
|
saveOpt := f.opt
|
|
|
|
defer func() {
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
f.opt = saveOpt
|
|
|
|
}()
|
|
|
|
f.opt.FailHard = false
|
|
|
|
|
|
|
|
runSubtest := func(contents, name string) {
|
|
|
|
description := fmt.Sprintf("file with %s metadata", name)
|
|
|
|
filename := path.Join(dir, name)
|
|
|
|
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
|
|
|
|
2021-10-11 12:35:06 +00:00
|
|
|
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
|
|
|
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
2019-10-09 09:21:45 +00:00
|
|
|
|
|
|
|
obj, err := f.NewObject(ctx, filename)
|
|
|
|
assert.NoError(t, err, "access "+description)
|
|
|
|
assert.NotNil(t, obj)
|
|
|
|
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
|
|
|
|
|
|
|
|
o, ok := obj.(*Object)
|
|
|
|
assert.NotNil(t, ok)
|
|
|
|
if o != nil {
|
|
|
|
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
|
|
|
|
o = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
_ = obj.Remove(ctx)
|
|
|
|
_ = part.Remove(ctx)
|
|
|
|
}()
|
|
|
|
|
|
|
|
r, err := obj.Open(ctx)
|
|
|
|
assert.NoError(t, err, "open "+description)
|
|
|
|
assert.NotNil(t, r, "open stream of "+description)
|
|
|
|
if err == nil && r != nil {
|
2022-08-20 14:38:02 +00:00
|
|
|
data, err := io.ReadAll(r)
|
2019-10-09 09:21:45 +00:00
|
|
|
assert.NoError(t, err, "read all of "+description)
|
|
|
|
assert.Equal(t, contents, string(data), description+" contents is ok")
|
|
|
|
_ = r.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 20:30:04 +00:00
|
|
|
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
|
2019-10-09 09:21:45 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
todaysMeta := string(metaData)
|
|
|
|
runSubtest(todaysMeta, "today")
|
|
|
|
|
|
|
|
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
|
|
|
|
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
|
|
|
|
runSubtest(pastMeta, "past")
|
|
|
|
|
|
|
|
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
|
|
|
|
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
|
|
|
|
runSubtest(futureMeta, "future")
|
|
|
|
}
|
|
|
|
|
2020-10-30 20:30:04 +00:00
|
|
|
// Test that chunker refuses to change on objects with future/unknown metadata
|
2021-01-04 01:08:22 +00:00
|
|
|
func testFutureProof(t *testing.T, f *Fs) {
|
2021-10-18 13:53:29 +00:00
|
|
|
if !f.useMeta {
|
2021-01-04 01:08:22 +00:00
|
|
|
t.Skip("this test requires metadata support")
|
|
|
|
}
|
|
|
|
|
|
|
|
saveOpt := f.opt
|
|
|
|
ctx := context.Background()
|
|
|
|
f.opt.FailHard = true
|
|
|
|
const dir = "future"
|
|
|
|
const file = dir + "/test"
|
|
|
|
defer func() {
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
f.opt = saveOpt
|
|
|
|
}()
|
|
|
|
|
|
|
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
putPart := func(name string, part int, data, msg string) {
|
|
|
|
if part > 0 {
|
|
|
|
name = f.makeChunkName(name, part-1, "", "")
|
|
|
|
}
|
|
|
|
item := fstest.Item{Path: name, ModTime: modTime}
|
2022-06-27 11:29:13 +00:00
|
|
|
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
2021-01-04 01:08:22 +00:00
|
|
|
assert.NotNil(t, obj, msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
// simulate chunked object from future
|
|
|
|
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
|
|
|
putPart(file, 0, meta, "metaobject")
|
|
|
|
putPart(file, 1, "abc", "chunk1")
|
|
|
|
putPart(file, 2, "def", "chunk2")
|
|
|
|
putPart(file, 3, "ghi", "chunk3")
|
|
|
|
|
|
|
|
// List should succeed
|
|
|
|
ls, err := f.List(ctx, dir)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, 1, len(ls))
|
|
|
|
assert.Equal(t, int64(9), ls[0].Size())
|
|
|
|
|
|
|
|
// NewObject should succeed
|
|
|
|
obj, err := f.NewObject(ctx, file)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, file, obj.Remote())
|
|
|
|
assert.Equal(t, int64(9), obj.Size())
|
|
|
|
|
|
|
|
// Hash must fail
|
|
|
|
_, err = obj.Hash(ctx, hash.SHA1)
|
|
|
|
assert.Equal(t, ErrMetaUnknown, err)
|
|
|
|
|
|
|
|
// Move must fail
|
|
|
|
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
|
|
|
assert.Nil(t, mobj)
|
|
|
|
assert.Error(t, err)
|
|
|
|
if err != nil {
|
|
|
|
assert.Contains(t, err.Error(), "please upgrade rclone")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put must fail
|
|
|
|
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
|
|
|
buf := bytes.NewBufferString("abc")
|
|
|
|
_, err = f.Put(ctx, buf, oi)
|
|
|
|
assert.Error(t, err)
|
|
|
|
|
|
|
|
// Rcat must fail
|
2022-08-20 14:38:02 +00:00
|
|
|
in := io.NopCloser(bytes.NewBufferString("abc"))
|
2022-11-08 17:42:18 +00:00
|
|
|
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
2021-01-04 01:08:22 +00:00
|
|
|
assert.Nil(t, robj)
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
if err != nil {
|
|
|
|
assert.Contains(t, err.Error(), "please upgrade rclone")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 20:30:04 +00:00
|
|
|
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
|
|
|
|
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
|
|
|
|
func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
|
|
|
if !f.useMeta {
|
|
|
|
t.Skip("Can't do norename transactions without metadata")
|
|
|
|
}
|
|
|
|
const dir = "backcomp"
|
|
|
|
ctx := context.Background()
|
|
|
|
saveOpt := f.opt
|
|
|
|
saveUseNoRename := f.useNoRename
|
|
|
|
defer func() {
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
f.opt = saveOpt
|
|
|
|
f.useNoRename = saveUseNoRename
|
|
|
|
}()
|
|
|
|
f.opt.ChunkSize = fs.SizeSuffix(10)
|
|
|
|
|
|
|
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
contents := random.String(250)
|
|
|
|
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
|
|
|
filename := path.Join(dir, name)
|
|
|
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
2022-06-27 11:29:13 +00:00
|
|
|
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
2020-10-30 20:30:04 +00:00
|
|
|
require.NotNil(t, obj)
|
|
|
|
return obj, filename
|
|
|
|
}
|
|
|
|
|
|
|
|
f.opt.FailHard = false
|
|
|
|
f.useNoRename = false
|
|
|
|
file, fileName := newFile(f, "renamefile")
|
|
|
|
|
|
|
|
f.opt.FailHard = false
|
|
|
|
item := fstest.NewItem(fileName, contents, modTime)
|
|
|
|
|
|
|
|
var items []fstest.Item
|
|
|
|
items = append(items, item)
|
|
|
|
|
|
|
|
f.useNoRename = true
|
|
|
|
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
|
|
|
|
_, err := f.NewObject(ctx, fileName)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
f.opt.FailHard = true
|
|
|
|
_, err = f.List(ctx, dir)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
f.opt.FailHard = false
|
|
|
|
_ = file.Remove(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|
|
|
if !f.useMeta {
|
|
|
|
t.Skip("Can't test norename transactions without metadata")
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
const dir = "servermovetest"
|
|
|
|
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
|
|
|
|
|
|
|
|
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
fs1, isChunkerFs := subFs1.(*Fs)
|
|
|
|
assert.True(t, isChunkerFs)
|
|
|
|
fs1.useNoRename = false
|
|
|
|
fs1.opt.ChunkSize = fs.SizeSuffix(3)
|
|
|
|
|
|
|
|
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
fs2, isChunkerFs := subFs2.(*Fs)
|
|
|
|
assert.True(t, isChunkerFs)
|
|
|
|
fs2.useNoRename = true
|
|
|
|
fs2.opt.ChunkSize = fs.SizeSuffix(3)
|
|
|
|
|
|
|
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
|
|
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
|
|
|
contents := "abcdef"
|
2022-06-27 11:29:13 +00:00
|
|
|
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
2020-10-30 20:30:04 +00:00
|
|
|
|
|
|
|
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
|
|
|
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, int64(len(contents)), dstFile.Size())
|
|
|
|
|
|
|
|
r, err := dstFile.Open(ctx)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NotNil(t, r)
|
2022-08-20 14:38:02 +00:00
|
|
|
data, err := io.ReadAll(r)
|
2020-10-30 20:30:04 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, contents, string(data))
|
|
|
|
_ = r.Close()
|
|
|
|
_ = operations.Purge(ctx, f.base, dir)
|
|
|
|
}
|
|
|
|
|
2021-10-11 12:35:06 +00:00
|
|
|
// Test that md5all creates metadata even for small files
|
|
|
|
func testMD5AllSlow(t *testing.T, f *Fs) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
2021-10-18 13:53:29 +00:00
|
|
|
"chunk_size": "1P",
|
|
|
|
"name_format": "*.#",
|
|
|
|
"hash_type": "md5all",
|
|
|
|
"transactions": "rename",
|
|
|
|
"meta_format": "simplejson",
|
2021-10-11 12:35:06 +00:00
|
|
|
})
|
|
|
|
chunkFs, ok := fsResult.(*Fs)
|
|
|
|
require.True(t, ok, "fs must be a chunker remote")
|
|
|
|
baseFs := chunkFs.base
|
|
|
|
if !baseFs.Features().SlowHash {
|
|
|
|
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.True(t, chunkFs.useMD5, "must use md5")
|
|
|
|
assert.True(t, chunkFs.hashAll, "must hash all files")
|
|
|
|
|
|
|
|
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
|
|
|
obj, err := chunkFs.NewObject(ctx, "file")
|
|
|
|
require.NoError(t, err)
|
|
|
|
sum, err := obj.Hash(ctx, hash.MD5)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
|
|
|
|
|
|
|
list, err := baseFs.List(ctx, "")
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, 2, len(list))
|
|
|
|
_, err = baseFs.NewObject(ctx, "file")
|
|
|
|
assert.NoError(t, err, "metadata must be created")
|
|
|
|
_, err = baseFs.NewObject(ctx, "file.1")
|
|
|
|
assert.NoError(t, err, "first chunk must be created")
|
|
|
|
|
|
|
|
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
|
|
|
}
|
|
|
|
|
2019-10-04 01:05:45 +00:00
|
|
|
// InternalTest dispatches all internal tests
|
2019-06-09 17:41:48 +00:00
|
|
|
func (f *Fs) InternalTest(t *testing.T) {
|
|
|
|
t.Run("PutLarge", func(t *testing.T) {
|
|
|
|
if *UploadKilobytes <= 0 {
|
|
|
|
t.Skip("-upload-kilobytes is not set")
|
|
|
|
}
|
2019-10-04 01:05:45 +00:00
|
|
|
testPutLarge(t, f, *UploadKilobytes)
|
2019-06-09 17:41:48 +00:00
|
|
|
})
|
|
|
|
t.Run("ChunkNameFormat", func(t *testing.T) {
|
2019-10-04 01:05:45 +00:00
|
|
|
testChunkNameFormat(t, f)
|
2019-06-09 17:41:48 +00:00
|
|
|
})
|
2019-10-09 09:21:45 +00:00
|
|
|
t.Run("SmallFileInternals", func(t *testing.T) {
|
|
|
|
testSmallFileInternals(t, f)
|
|
|
|
})
|
|
|
|
t.Run("PreventCorruption", func(t *testing.T) {
|
|
|
|
testPreventCorruption(t, f)
|
|
|
|
})
|
|
|
|
t.Run("ChunkNumberOverflow", func(t *testing.T) {
|
|
|
|
testChunkNumberOverflow(t, f)
|
|
|
|
})
|
|
|
|
t.Run("MetadataInput", func(t *testing.T) {
|
|
|
|
testMetadataInput(t, f)
|
|
|
|
})
|
2021-01-04 01:08:22 +00:00
|
|
|
t.Run("FutureProof", func(t *testing.T) {
|
|
|
|
testFutureProof(t, f)
|
|
|
|
})
|
2020-10-30 20:30:04 +00:00
|
|
|
t.Run("BackwardsCompatibility", func(t *testing.T) {
|
|
|
|
testBackwardsCompatibility(t, f)
|
|
|
|
})
|
|
|
|
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
|
|
|
testChunkerServerSideMove(t, f)
|
|
|
|
})
|
2021-10-11 12:35:06 +00:00
|
|
|
t.Run("MD5AllSlow", func(t *testing.T) {
|
|
|
|
testMD5AllSlow(t, f)
|
|
|
|
})
|
2019-06-09 17:41:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ fstests.InternalTester = (*Fs)(nil)
|