2017-07-03 14:05:27 +00:00
|
|
|
package info
|
|
|
|
|
|
|
|
// FIXME once translations are implemented will need a no-escape
|
|
|
|
// option for Put so we can make these tests work agaig
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-06-17 08:34:30 +00:00
|
|
|
"context"
|
2019-05-14 15:49:55 +00:00
|
|
|
"encoding/json"
|
2017-07-03 14:05:27 +00:00
|
|
|
"fmt"
|
2017-08-22 06:00:10 +00:00
|
|
|
"io"
|
2019-05-14 15:49:55 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"regexp"
|
2017-07-03 14:05:27 +00:00
|
|
|
"sort"
|
2019-05-14 15:49:55 +00:00
|
|
|
"strconv"
|
2017-07-03 14:05:27 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2019-07-28 17:47:38 +00:00
|
|
|
"github.com/rclone/rclone/cmd"
|
2019-05-14 15:49:55 +00:00
|
|
|
"github.com/rclone/rclone/cmd/info/internal"
|
2019-07-28 17:47:38 +00:00
|
|
|
"github.com/rclone/rclone/fs"
|
2019-10-11 15:55:04 +00:00
|
|
|
"github.com/rclone/rclone/fs/config/flags"
|
2019-07-28 17:47:38 +00:00
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/object"
|
2019-08-06 11:44:08 +00:00
|
|
|
"github.com/rclone/rclone/lib/random"
|
2017-07-03 14:05:27 +00:00
|
|
|
"github.com/spf13/cobra"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-05-14 15:49:55 +00:00
|
|
|
writeJSON string
|
2017-07-03 14:05:27 +00:00
|
|
|
checkNormalization bool
|
|
|
|
checkControl bool
|
|
|
|
checkLength bool
|
2017-08-22 06:00:10 +00:00
|
|
|
checkStreaming bool
|
2019-05-14 15:49:55 +00:00
|
|
|
uploadWait time.Duration
|
|
|
|
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
|
|
|
|
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
|
|
|
|
positionRightRe = regexp.MustCompile(`(?s)^position-right-([[:xdigit:]]+)-(.*)$`)
|
2017-07-03 14:05:27 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2019-10-11 15:58:11 +00:00
|
|
|
cmd.Root.AddCommand(commandDefinition)
|
2019-10-11 15:55:04 +00:00
|
|
|
cmdFlags := commandDefinition.Flags()
|
|
|
|
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
|
|
|
|
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
|
|
|
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
|
|
|
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
|
|
|
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 00:17:24 +00:00
|
|
|
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
|
2019-10-11 15:58:11 +00:00
|
|
|
var commandDefinition = &cobra.Command{
|
2017-07-03 14:05:27 +00:00
|
|
|
Use: "info [remote:path]+",
|
2017-08-22 06:00:10 +00:00
|
|
|
Short: `Discovers file name or other limitations for paths.`,
|
|
|
|
Long: `rclone info discovers what filenames and upload methods are possible
|
|
|
|
to write to the paths passed in and how long they can be. It can take some
|
|
|
|
time. It will write test files into the remote:path passed in. It outputs
|
|
|
|
a bit of go code for each one.
|
2017-07-03 14:05:27 +00:00
|
|
|
`,
|
|
|
|
Hidden: true,
|
|
|
|
Run: func(command *cobra.Command, args []string) {
|
2019-09-05 12:59:06 +00:00
|
|
|
cmd.CheckArgs(1, 1e6, command, args)
|
2017-07-03 14:05:27 +00:00
|
|
|
for i := range args {
|
2018-05-07 16:58:16 +00:00
|
|
|
f := cmd.NewFsDir(args[i : i+1])
|
2017-07-03 14:05:27 +00:00
|
|
|
cmd.Run(false, false, command, func() error {
|
2019-06-17 08:34:30 +00:00
|
|
|
return readInfo(context.Background(), f)
|
2017-07-03 14:05:27 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
type results struct {
|
2019-06-17 08:34:30 +00:00
|
|
|
ctx context.Context
|
2017-07-03 14:05:27 +00:00
|
|
|
f fs.Fs
|
|
|
|
mu sync.Mutex
|
2019-05-14 15:49:55 +00:00
|
|
|
stringNeedsEscaping map[string]internal.Position
|
|
|
|
controlResults map[string]internal.ControlResult
|
2017-07-03 14:05:27 +00:00
|
|
|
maxFileLength int
|
|
|
|
canWriteUnnormalized bool
|
|
|
|
canReadUnnormalized bool
|
|
|
|
canReadRenormalized bool
|
2017-08-22 06:00:10 +00:00
|
|
|
canStream bool
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
|
2019-06-17 08:34:30 +00:00
|
|
|
func newResults(ctx context.Context, f fs.Fs) *results {
|
2017-07-03 14:05:27 +00:00
|
|
|
return &results{
|
2019-06-17 08:34:30 +00:00
|
|
|
ctx: ctx,
|
2018-11-02 12:12:09 +00:00
|
|
|
f: f,
|
2019-05-14 15:49:55 +00:00
|
|
|
stringNeedsEscaping: make(map[string]internal.Position),
|
|
|
|
controlResults: make(map[string]internal.ControlResult),
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print the results to stdout
|
|
|
|
func (r *results) Print() {
|
|
|
|
fmt.Printf("// %s\n", r.f.Name())
|
|
|
|
if checkControl {
|
|
|
|
escape := []string{}
|
2018-11-02 12:12:09 +00:00
|
|
|
for c, needsEscape := range r.stringNeedsEscaping {
|
2019-05-14 15:49:55 +00:00
|
|
|
if needsEscape != internal.PositionNone {
|
|
|
|
k := strconv.Quote(c)
|
|
|
|
k = k[1 : len(k)-1]
|
|
|
|
escape = append(escape, fmt.Sprintf("'%s'", k))
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(escape)
|
2019-05-14 15:49:55 +00:00
|
|
|
fmt.Printf("stringNeedsEscaping = []rune{\n")
|
2017-07-03 14:05:27 +00:00
|
|
|
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
|
|
|
fmt.Printf("}\n")
|
|
|
|
}
|
|
|
|
if checkLength {
|
|
|
|
fmt.Printf("maxFileLength = %d\n", r.maxFileLength)
|
|
|
|
}
|
|
|
|
if checkNormalization {
|
|
|
|
fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized)
|
|
|
|
fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized)
|
|
|
|
fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized)
|
|
|
|
}
|
2017-08-22 06:00:10 +00:00
|
|
|
if checkStreaming {
|
|
|
|
fmt.Printf("canStream = %v\n", r.canStream)
|
|
|
|
}
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 15:49:55 +00:00
|
|
|
// WriteJSON writes the results to a JSON file when requested
|
|
|
|
func (r *results) WriteJSON() {
|
|
|
|
if writeJSON == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
report := internal.InfoReport{
|
|
|
|
Remote: r.f.Name(),
|
|
|
|
}
|
|
|
|
if checkControl {
|
|
|
|
report.ControlCharacters = &r.controlResults
|
|
|
|
}
|
|
|
|
if checkLength {
|
|
|
|
report.MaxFileLength = &r.maxFileLength
|
|
|
|
}
|
|
|
|
if checkNormalization {
|
|
|
|
report.CanWriteUnnormalized = &r.canWriteUnnormalized
|
|
|
|
report.CanReadUnnormalized = &r.canReadUnnormalized
|
|
|
|
report.CanReadRenormalized = &r.canReadRenormalized
|
|
|
|
}
|
|
|
|
if checkStreaming {
|
|
|
|
report.CanStream = &r.canStream
|
|
|
|
}
|
|
|
|
|
|
|
|
if f, err := os.Create(writeJSON); err != nil {
|
|
|
|
fs.Errorf(r.f, "Creating JSON file failed: %s", err)
|
|
|
|
} else {
|
|
|
|
defer fs.CheckClose(f, &err)
|
|
|
|
enc := json.NewEncoder(f)
|
|
|
|
enc.SetIndent("", " ")
|
|
|
|
err := enc.Encode(report)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(r.f, "Writing JSON file failed: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fs.Infof(r.f, "Wrote JSON file: %s", writeJSON)
|
|
|
|
}
|
|
|
|
|
2017-07-03 14:05:27 +00:00
|
|
|
// writeFile writes a file with some random contents
|
|
|
|
func (r *results) writeFile(path string) (fs.Object, error) {
|
2019-08-06 11:44:08 +00:00
|
|
|
contents := random.String(50)
|
2018-01-12 16:30:54 +00:00
|
|
|
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
2019-05-14 15:49:55 +00:00
|
|
|
obj, err := r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
|
|
|
if uploadWait > 0 {
|
|
|
|
time.Sleep(uploadWait)
|
|
|
|
}
|
|
|
|
return obj, err
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// check whether normalization is enforced and check whether it is
|
|
|
|
// done on the files anyway
|
|
|
|
func (r *results) checkUTF8Normalization() {
|
|
|
|
unnormalized := "Héroique"
|
|
|
|
normalized := "Héroique"
|
|
|
|
_, err := r.writeFile(unnormalized)
|
|
|
|
if err != nil {
|
|
|
|
r.canWriteUnnormalized = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
r.canWriteUnnormalized = true
|
2019-06-17 08:34:30 +00:00
|
|
|
_, err = r.f.NewObject(r.ctx, unnormalized)
|
2017-07-03 14:05:27 +00:00
|
|
|
if err == nil {
|
|
|
|
r.canReadUnnormalized = true
|
|
|
|
}
|
2019-06-17 08:34:30 +00:00
|
|
|
_, err = r.f.NewObject(r.ctx, normalized)
|
2017-07-03 14:05:27 +00:00
|
|
|
if err == nil {
|
|
|
|
r.canReadRenormalized = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 15:49:55 +00:00
|
|
|
func (r *results) checkStringPositions(k, s string) {
|
2018-11-02 12:12:09 +00:00
|
|
|
fs.Infof(r.f, "Writing position file 0x%0X", s)
|
2019-05-14 15:49:55 +00:00
|
|
|
positionError := internal.PositionNone
|
|
|
|
res := internal.ControlResult{
|
|
|
|
Text: s,
|
|
|
|
WriteError: make(map[internal.Position]string, 3),
|
|
|
|
GetError: make(map[internal.Position]string, 3),
|
|
|
|
InList: make(map[internal.Position]internal.Presence, 3),
|
|
|
|
}
|
2018-11-02 12:12:09 +00:00
|
|
|
|
2019-05-14 15:49:55 +00:00
|
|
|
for _, pos := range internal.PositionList {
|
2018-11-02 12:12:09 +00:00
|
|
|
path := ""
|
|
|
|
switch pos {
|
2019-05-14 15:49:55 +00:00
|
|
|
case internal.PositionMiddle:
|
2018-11-02 12:12:09 +00:00
|
|
|
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
|
2019-05-14 15:49:55 +00:00
|
|
|
case internal.PositionLeft:
|
2018-11-02 12:12:09 +00:00
|
|
|
path = fmt.Sprintf("%s-position-left-%0X", s, s)
|
2019-05-14 15:49:55 +00:00
|
|
|
case internal.PositionRight:
|
2018-11-02 12:12:09 +00:00
|
|
|
path = fmt.Sprintf("position-right-%0X-%s", s, s)
|
|
|
|
default:
|
|
|
|
panic("invalid position: " + pos.String())
|
|
|
|
}
|
2019-05-14 15:49:55 +00:00
|
|
|
_, writeError := r.writeFile(path)
|
|
|
|
if writeError != nil {
|
|
|
|
res.WriteError[pos] = writeError.Error()
|
|
|
|
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeError)
|
2018-11-02 12:12:09 +00:00
|
|
|
} else {
|
|
|
|
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
|
|
|
}
|
2019-06-17 08:34:30 +00:00
|
|
|
obj, getErr := r.f.NewObject(r.ctx, path)
|
2018-11-02 12:12:09 +00:00
|
|
|
if getErr != nil {
|
2019-05-14 15:49:55 +00:00
|
|
|
res.GetError[pos] = getErr.Error()
|
2018-11-02 12:12:09 +00:00
|
|
|
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
|
|
|
} else {
|
|
|
|
if obj.Size() != 50 {
|
2019-05-14 15:49:55 +00:00
|
|
|
res.GetError[pos] = fmt.Sprintf("invalid size %d", obj.Size())
|
2018-11-02 12:12:09 +00:00
|
|
|
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
|
|
|
|
} else {
|
|
|
|
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
|
|
|
|
}
|
|
|
|
}
|
2019-05-14 15:49:55 +00:00
|
|
|
if writeError != nil || getErr != nil {
|
2018-11-02 12:12:09 +00:00
|
|
|
positionError += pos
|
|
|
|
}
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
2018-11-02 12:12:09 +00:00
|
|
|
|
2017-07-03 14:05:27 +00:00
|
|
|
r.mu.Lock()
|
2019-05-14 15:49:55 +00:00
|
|
|
r.stringNeedsEscaping[k] = positionError
|
|
|
|
r.controlResults[k] = res
|
2017-07-03 14:05:27 +00:00
|
|
|
r.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// check we can write a file with the control chars
|
|
|
|
func (r *results) checkControls() {
|
|
|
|
fs.Infof(r.f, "Trying to create control character file names")
|
|
|
|
// Concurrency control
|
|
|
|
tokens := make(chan struct{}, fs.Config.Checkers)
|
|
|
|
for i := 0; i < fs.Config.Checkers; i++ {
|
|
|
|
tokens <- struct{}{}
|
|
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := rune(0); i < 128; i++ {
|
2018-11-02 12:12:09 +00:00
|
|
|
s := string(i)
|
2017-07-03 14:05:27 +00:00
|
|
|
if i == 0 || i == '/' {
|
|
|
|
// We're not even going to check NULL or /
|
2019-05-14 15:49:55 +00:00
|
|
|
r.stringNeedsEscaping[s] = internal.PositionAll
|
2017-07-03 14:05:27 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
2018-11-02 12:12:09 +00:00
|
|
|
go func(s string) {
|
2017-07-03 14:05:27 +00:00
|
|
|
defer wg.Done()
|
|
|
|
token := <-tokens
|
2019-05-14 15:49:55 +00:00
|
|
|
k := s
|
|
|
|
r.checkStringPositions(k, s)
|
2017-07-03 14:05:27 +00:00
|
|
|
tokens <- token
|
2018-11-02 12:12:09 +00:00
|
|
|
}(s)
|
|
|
|
}
|
2019-05-14 15:49:55 +00:00
|
|
|
for _, s := range []string{"\", "\u00A0", "\xBF", "\xFE"} {
|
2018-11-02 12:12:09 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(s string) {
|
|
|
|
defer wg.Done()
|
|
|
|
token := <-tokens
|
2019-05-14 15:49:55 +00:00
|
|
|
k := s
|
|
|
|
r.checkStringPositions(k, s)
|
2018-11-02 12:12:09 +00:00
|
|
|
tokens <- token
|
|
|
|
}(s)
|
2017-07-03 14:05:27 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
2019-05-14 15:49:55 +00:00
|
|
|
r.checkControlsList()
|
2017-07-03 14:05:27 +00:00
|
|
|
fs.Infof(r.f, "Done trying to create control character file names")
|
|
|
|
}
|
|
|
|
|
2019-05-14 15:49:55 +00:00
|
|
|
func (r *results) checkControlsList() {
|
|
|
|
l, err := r.f.List(context.TODO(), "")
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(r.f, "Listing control character file names failed: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
namesMap := make(map[string]struct{}, len(l))
|
|
|
|
for _, s := range l {
|
|
|
|
namesMap[path.Base(s.Remote())] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
for path := range namesMap {
|
|
|
|
var pos internal.Position
|
|
|
|
var hex, value string
|
|
|
|
if g := positionLeftRe.FindStringSubmatch(path); g != nil {
|
|
|
|
pos, hex, value = internal.PositionLeft, g[2], g[1]
|
|
|
|
} else if g := positionMiddleRe.FindStringSubmatch(path); g != nil {
|
|
|
|
pos, hex, value = internal.PositionMiddle, g[1], g[2]
|
|
|
|
} else if g := positionRightRe.FindStringSubmatch(path); g != nil {
|
|
|
|
pos, hex, value = internal.PositionRight, g[1], g[2]
|
|
|
|
} else {
|
|
|
|
fs.Infof(r.f, "Unknown path %q", path)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var hexValue []byte
|
|
|
|
for ; len(hex) >= 2; hex = hex[2:] {
|
|
|
|
if b, err := strconv.ParseUint(hex[:2], 16, 8); err != nil {
|
|
|
|
fs.Infof(r.f, "Invalid path %q: %s", path, err)
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
hexValue = append(hexValue, byte(b))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if hex != "" {
|
|
|
|
fs.Infof(r.f, "Invalid path %q", path)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
hexStr := string(hexValue)
|
|
|
|
k := hexStr
|
|
|
|
switch r.controlResults[k].InList[pos] {
|
|
|
|
case internal.Absent:
|
|
|
|
if hexStr == value {
|
|
|
|
r.controlResults[k].InList[pos] = internal.Present
|
|
|
|
} else {
|
|
|
|
r.controlResults[k].InList[pos] = internal.Renamed
|
|
|
|
}
|
|
|
|
case internal.Present:
|
|
|
|
r.controlResults[k].InList[pos] = internal.Multiple
|
|
|
|
case internal.Renamed:
|
|
|
|
r.controlResults[k].InList[pos] = internal.Multiple
|
|
|
|
}
|
|
|
|
delete(namesMap, path)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(namesMap) > 0 {
|
|
|
|
fs.Infof(r.f, "Found additional control character file names:")
|
|
|
|
for name := range namesMap {
|
|
|
|
fs.Infof(r.f, "%q", name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-03 14:05:27 +00:00
|
|
|
// find the max file name size we can use
|
|
|
|
func (r *results) findMaxLength() {
|
|
|
|
const maxLen = 16 * 1024
|
|
|
|
name := make([]byte, maxLen)
|
|
|
|
for i := range name {
|
|
|
|
name[i] = 'a'
|
|
|
|
}
|
|
|
|
// Find the first size of filename we can't write
|
|
|
|
i := sort.Search(len(name), func(i int) (fail bool) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
|
|
|
|
fail = true
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
path := string(name[:i])
|
|
|
|
_, err := r.writeFile(path)
|
|
|
|
if err != nil {
|
|
|
|
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
fs.Infof(r.f, "Wrote file with name length %d", i)
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
r.maxFileLength = i - 1
|
|
|
|
fs.Infof(r.f, "Max file length is %d", r.maxFileLength)
|
|
|
|
}
|
|
|
|
|
2017-08-22 06:00:10 +00:00
|
|
|
func (r *results) checkStreaming() {
|
|
|
|
putter := r.f.Put
|
|
|
|
if r.f.Features().PutStream != nil {
|
|
|
|
fs.Infof(r.f, "Given remote has specialized streaming function. Using that to test streaming.")
|
|
|
|
putter = r.f.Features().PutStream
|
|
|
|
}
|
|
|
|
|
|
|
|
contents := "thinking of test strings is hard"
|
|
|
|
buf := bytes.NewBufferString(contents)
|
2018-01-12 16:30:54 +00:00
|
|
|
hashIn := hash.NewMultiHasher()
|
2017-08-22 06:00:10 +00:00
|
|
|
in := io.TeeReader(buf, hashIn)
|
|
|
|
|
2018-01-12 16:30:54 +00:00
|
|
|
objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
|
2019-06-17 08:34:30 +00:00
|
|
|
objR, err := putter(r.ctx, in, objIn)
|
2017-08-22 06:00:10 +00:00
|
|
|
if err != nil {
|
|
|
|
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
|
|
|
|
r.canStream = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
hashes := hashIn.Sums()
|
|
|
|
types := objR.Fs().Hashes().Array()
|
2018-01-12 16:30:54 +00:00
|
|
|
for _, Hash := range types {
|
2019-06-17 08:34:30 +00:00
|
|
|
sum, err := objR.Hash(r.ctx, Hash)
|
2017-08-22 06:00:10 +00:00
|
|
|
if err != nil {
|
2018-01-12 16:30:54 +00:00
|
|
|
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err)
|
2017-08-22 06:00:10 +00:00
|
|
|
r.canStream = false
|
|
|
|
return
|
|
|
|
}
|
2018-01-12 16:30:54 +00:00
|
|
|
if !hash.Equals(hashes[Hash], sum) {
|
|
|
|
fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", Hash, hashes[Hash], sum)
|
2017-08-22 06:00:10 +00:00
|
|
|
r.canStream = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if int64(len(contents)) != objR.Size() {
|
|
|
|
fs.Infof(r.f, "Streamed file has incorrect file size: expecting %d got %d", len(contents), objR.Size())
|
|
|
|
r.canStream = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
r.canStream = true
|
|
|
|
}
|
|
|
|
|
2019-06-17 08:34:30 +00:00
|
|
|
func readInfo(ctx context.Context, f fs.Fs) error {
|
|
|
|
err := f.Mkdir(ctx, "")
|
2017-07-03 14:05:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "couldn't mkdir")
|
|
|
|
}
|
2019-06-17 08:34:30 +00:00
|
|
|
r := newResults(ctx, f)
|
2017-07-03 14:05:27 +00:00
|
|
|
if checkControl {
|
|
|
|
r.checkControls()
|
|
|
|
}
|
|
|
|
if checkLength {
|
|
|
|
r.findMaxLength()
|
|
|
|
}
|
|
|
|
if checkNormalization {
|
|
|
|
r.checkUTF8Normalization()
|
|
|
|
}
|
2017-08-22 06:00:10 +00:00
|
|
|
if checkStreaming {
|
|
|
|
r.checkStreaming()
|
|
|
|
}
|
2017-07-03 14:05:27 +00:00
|
|
|
r.Print()
|
2019-05-14 15:49:55 +00:00
|
|
|
r.WriteJSON()
|
2017-07-03 14:05:27 +00:00
|
|
|
return nil
|
|
|
|
}
|