forked from TrueCloudLab/restic
Update vendored deps (except minio-go)
This commit is contained in:
parent
f45abac27f
commit
d6da9211bc
233 changed files with 33586 additions and 20533 deletions
48
vendor/manifest
vendored
48
vendor/manifest
vendored
|
@ -10,13 +10,13 @@
|
|||
{
|
||||
"importpath": "github.com/elithrar/simple-scrypt",
|
||||
"repository": "https://github.com/elithrar/simple-scrypt",
|
||||
"revision": "2325946f714c95de4a6088202c402fbdfa64163b",
|
||||
"revision": "6724715de445c2e70cdafb7a1a14c8cfe0984210",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/go-ini/ini",
|
||||
"repository": "https://github.com/go-ini/ini",
|
||||
"revision": "e7fea39b01aea8d5671f6858f0532f56e8bff3a5",
|
||||
"revision": "3d73f4b845efdf9989fffd4b4e562727744a34ba",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
|
@ -34,13 +34,13 @@
|
|||
{
|
||||
"importpath": "github.com/kurin/blazer",
|
||||
"repository": "https://github.com/kurin/blazer",
|
||||
"revision": "d1b9d31c8641e46f2651fe564ee9ddb857c1ed29",
|
||||
"revision": "612082ed2430716569f1ec816fc6ade849020816",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/minio/go-homedir",
|
||||
"repository": "https://github.com/minio/go-homedir",
|
||||
"revision": "0b1069c753c94b3633cc06a1995252dbcc27c7a6",
|
||||
"revision": "21304a94172ae3a09dee2cd86a12fb6f842138c7",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
|
@ -52,104 +52,104 @@
|
|||
{
|
||||
"importpath": "github.com/ncw/swift",
|
||||
"repository": "https://github.com/ncw/swift",
|
||||
"revision": "bf51ccd3b5c3a1f12ac762b4511c5f9f1ce6b26f",
|
||||
"revision": "9e6fdb8957a022d5780a78b58d6181c3580bb01f",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pkg/errors",
|
||||
"repository": "https://github.com/pkg/errors",
|
||||
"revision": "645ef00459ed84a119197bfb8d8205042c6df63d",
|
||||
"branch": "HEAD"
|
||||
"revision": "c605e284fe17294bda444b34710735b29d1a9d90",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pkg/profile",
|
||||
"repository": "https://github.com/pkg/profile",
|
||||
"revision": "1c16f117a3ab788fdf0e334e623b8bccf5679866",
|
||||
"branch": "HEAD"
|
||||
"revision": "5b67d428864e92711fcbd2f8629456121a56d91f",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pkg/sftp",
|
||||
"repository": "https://github.com/pkg/sftp",
|
||||
"revision": "8197a2e580736b78d704be0fc47b2324c0591a32",
|
||||
"revision": "314a5ccb89b21e053d7d96c3a706eacaf2b18231",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/pkg/xattr",
|
||||
"repository": "https://github.com/pkg/xattr",
|
||||
"revision": "858d49c224b241ba9393e20f521f6a76f52dd482",
|
||||
"branch": "HEAD"
|
||||
"revision": "2c7218aab2e9980561010ef420b53d948749deaf",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/restic/chunker",
|
||||
"repository": "https://github.com/restic/chunker",
|
||||
"revision": "bb2ecf9a98e35a0b336ffc23fc515fb6e7961577",
|
||||
"branch": "HEAD"
|
||||
"revision": "1542d55ca53d2d8d7b38e890f7a4be90014356af",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/spf13/cobra",
|
||||
"repository": "https://github.com/spf13/cobra",
|
||||
"revision": "10f6b9d7e1631a54ad07c5c0fb71c28a1abfd3c2",
|
||||
"revision": "d994347edadc56d6a7f863775fb6887606685ae6",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/spf13/pflag",
|
||||
"repository": "https://github.com/spf13/pflag",
|
||||
"revision": "2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51",
|
||||
"revision": "e57e3eeb33f795204c1ca35f56c44f83227c6e66",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/crypto/curve25519",
|
||||
"repository": "https://go.googlesource.com/crypto",
|
||||
"revision": "efac7f277b17c19894091e358c6130cb6bd51117",
|
||||
"revision": "7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9",
|
||||
"branch": "master",
|
||||
"path": "/curve25519"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/crypto/ed25519",
|
||||
"repository": "https://go.googlesource.com/crypto",
|
||||
"revision": "efac7f277b17c19894091e358c6130cb6bd51117",
|
||||
"revision": "7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9",
|
||||
"branch": "master",
|
||||
"path": "/ed25519"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/crypto/pbkdf2",
|
||||
"repository": "https://go.googlesource.com/crypto",
|
||||
"revision": "efac7f277b17c19894091e358c6130cb6bd51117",
|
||||
"revision": "7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9",
|
||||
"branch": "master",
|
||||
"path": "/pbkdf2"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/crypto/poly1305",
|
||||
"repository": "https://go.googlesource.com/crypto",
|
||||
"revision": "efac7f277b17c19894091e358c6130cb6bd51117",
|
||||
"revision": "7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9",
|
||||
"branch": "master",
|
||||
"path": "/poly1305"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/crypto/scrypt",
|
||||
"repository": "https://go.googlesource.com/crypto",
|
||||
"revision": "efac7f277b17c19894091e358c6130cb6bd51117",
|
||||
"revision": "7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9",
|
||||
"branch": "master",
|
||||
"path": "/scrypt"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/crypto/ssh",
|
||||
"repository": "https://go.googlesource.com/crypto",
|
||||
"revision": "efac7f277b17c19894091e358c6130cb6bd51117",
|
||||
"revision": "7f7c0c2d75ebb4e32a21396ce36e87b6dadc91c9",
|
||||
"branch": "master",
|
||||
"path": "/ssh"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/net/context",
|
||||
"repository": "https://go.googlesource.com/net",
|
||||
"revision": "5602c733f70afc6dcec6766be0d5034d4c4f14de",
|
||||
"revision": "b3756b4b77d7b13260a0a2ec658753cf48922eac",
|
||||
"branch": "master",
|
||||
"path": "/context"
|
||||
},
|
||||
{
|
||||
"importpath": "golang.org/x/sys/unix",
|
||||
"repository": "https://go.googlesource.com/sys",
|
||||
"revision": "f3918c30c5c2cb527c0b071a27c35120a6c0719a",
|
||||
"revision": "4cd6d1a821c7175768725b55ca82f14683a29ea4",
|
||||
"branch": "master",
|
||||
"path": "/unix"
|
||||
}
|
||||
|
|
80
vendor/src/github.com/elithrar/simple-scrypt/compositor.json
vendored
Normal file
80
vendor/src/github.com/elithrar/simple-scrypt/compositor.json
vendored
Normal file
File diff suppressed because one or more lines are too long
10
vendor/src/github.com/go-ini/ini/README.md
vendored
10
vendor/src/github.com/go-ini/ini/README.md
vendored
|
@ -83,8 +83,8 @@ sec1, err := cfg.GetSection("Section")
|
|||
sec2, err := cfg.GetSection("SecTIOn")
|
||||
|
||||
// key1 and key2 are the exactly same key object
|
||||
key1, err := cfg.GetKey("Key")
|
||||
key2, err := cfg.GetKey("KeY")
|
||||
key1, err := sec1.GetKey("Key")
|
||||
key2, err := sec2.GetKey("KeY")
|
||||
```
|
||||
|
||||
#### MySQL-like boolean key
|
||||
|
@ -122,6 +122,12 @@ Take care that following format will be treated as comment:
|
|||
|
||||
If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
|
||||
|
||||
Alternatively, you can use following `LoadOptions` to completely ignore inline comments:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini"))
|
||||
```
|
||||
|
||||
### Working with sections
|
||||
|
||||
To get a section, you would need to:
|
||||
|
|
10
vendor/src/github.com/go-ini/ini/README_ZH.md
vendored
10
vendor/src/github.com/go-ini/ini/README_ZH.md
vendored
|
@ -76,8 +76,8 @@ sec1, err := cfg.GetSection("Section")
|
|||
sec2, err := cfg.GetSection("SecTIOn")
|
||||
|
||||
// key1 和 key2 指向同一个键对象
|
||||
key1, err := cfg.GetKey("Key")
|
||||
key2, err := cfg.GetKey("KeY")
|
||||
key1, err := sec1.GetKey("Key")
|
||||
key2, err := sec2.GetKey("KeY")
|
||||
```
|
||||
|
||||
#### 类似 MySQL 配置中的布尔值键
|
||||
|
@ -115,6 +115,12 @@ key, err := sec.NewBooleanKey("skip-host-cache")
|
|||
|
||||
如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
|
||||
|
||||
除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释:
|
||||
|
||||
```go
|
||||
cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini"))
|
||||
```
|
||||
|
||||
### 操作分区(Section)
|
||||
|
||||
获取指定分区:
|
||||
|
|
9
vendor/src/github.com/go-ini/ini/ini.go
vendored
9
vendor/src/github.com/go-ini/ini/ini.go
vendored
|
@ -37,7 +37,7 @@ const (
|
|||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
_DEPTH_VALUES = 99
|
||||
_VERSION = "1.27.0"
|
||||
_VERSION = "1.28.1"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
|
@ -60,6 +60,9 @@ var (
|
|||
|
||||
// Explicitly write DEFAULT section header
|
||||
DefaultHeader = false
|
||||
|
||||
// Indicate whether to put a line between sections
|
||||
PrettySection = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -504,7 +507,7 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
|
|||
// In case key value contains "\n", "`", "\"", "#" or ";"
|
||||
if strings.ContainsAny(val, "\n`") {
|
||||
val = `"""` + val + `"""`
|
||||
} else if strings.ContainsAny(val, "#;") {
|
||||
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||
val = "`" + val + "`"
|
||||
}
|
||||
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||
|
@ -513,11 +516,13 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err = buf.WriteString(LineBreak); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf.WriteTo(w)
|
||||
}
|
||||
|
|
7
vendor/src/github.com/go-ini/ini/ini_test.go
vendored
7
vendor/src/github.com/go-ini/ini/ini_test.go
vendored
|
@ -215,6 +215,13 @@ key2=value #comment2`))
|
|||
|
||||
So(cfg.Section("").Key("key1").String(), ShouldEqual, `value ;comment`)
|
||||
So(cfg.Section("").Key("key2").String(), ShouldEqual, `value #comment2`)
|
||||
|
||||
var buf bytes.Buffer
|
||||
cfg.WriteTo(&buf)
|
||||
So(buf.String(), ShouldEqual, `key1 = value ;comment
|
||||
key2 = value #comment2
|
||||
|
||||
`)
|
||||
})
|
||||
|
||||
Convey("Load with boolean type keys", t, func() {
|
||||
|
|
2
vendor/src/github.com/go-ini/ini/parser.go
vendored
2
vendor/src/github.com/go-ini/ini/parser.go
vendored
|
@ -189,7 +189,7 @@ func (p *parser) readContinuationLines(val string) (string, error) {
|
|||
// are quotes \" or \'.
|
||||
// It returns false if any other parts also contain same kind of quotes.
|
||||
func hasSurroundedQuote(in string, quote byte) bool {
|
||||
return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
|
||||
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
|
||||
strings.IndexByte(in[1:], quote) == len(in)-2
|
||||
}
|
||||
|
||||
|
|
92
vendor/src/github.com/go-ini/ini/struct.go
vendored
92
vendor/src/github.com/go-ini/ini/struct.go
vendored
|
@ -78,7 +78,7 @@ func parseDelim(actual string) string {
|
|||
var reflectTime = reflect.TypeOf(time.Now()).Kind()
|
||||
|
||||
// setSliceWithProperType sets proper values to slice based on its type.
|
||||
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
|
||||
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||
var strs []string
|
||||
if allowShadow {
|
||||
strs = key.StringsWithShadows(delim)
|
||||
|
@ -92,26 +92,30 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh
|
|||
}
|
||||
|
||||
var vals interface{}
|
||||
var err error
|
||||
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
switch sliceOf {
|
||||
case reflect.String:
|
||||
vals = strs
|
||||
case reflect.Int:
|
||||
vals, _ = key.parseInts(strs, true, false)
|
||||
vals, err = key.parseInts(strs, true, false)
|
||||
case reflect.Int64:
|
||||
vals, _ = key.parseInt64s(strs, true, false)
|
||||
vals, err = key.parseInt64s(strs, true, false)
|
||||
case reflect.Uint:
|
||||
vals, _ = key.parseUints(strs, true, false)
|
||||
vals, err = key.parseUints(strs, true, false)
|
||||
case reflect.Uint64:
|
||||
vals, _ = key.parseUint64s(strs, true, false)
|
||||
vals, err = key.parseUint64s(strs, true, false)
|
||||
case reflect.Float64:
|
||||
vals, _ = key.parseFloat64s(strs, true, false)
|
||||
vals, err = key.parseFloat64s(strs, true, false)
|
||||
case reflectTime:
|
||||
vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false)
|
||||
vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
if isStrict {
|
||||
return err
|
||||
}
|
||||
|
||||
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
|
||||
for i := 0; i < numVals; i++ {
|
||||
|
@ -136,10 +140,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh
|
|||
return nil
|
||||
}
|
||||
|
||||
func wrapStrictError(err error, isStrict bool) error {
|
||||
if isStrict {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setWithProperType sets proper value to field based on its type,
|
||||
// but it does not return error for failing parsing,
|
||||
// because we want to use default value that is already assigned to strcut.
|
||||
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
|
||||
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if len(key.String()) == 0 {
|
||||
|
@ -149,7 +160,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
|||
case reflect.Bool:
|
||||
boolVal, err := key.Bool()
|
||||
if err != nil {
|
||||
return nil
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetBool(boolVal)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
|
@ -161,8 +172,8 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
|||
}
|
||||
|
||||
intVal, err := key.Int64()
|
||||
if err != nil || intVal == 0 {
|
||||
return nil
|
||||
if err != nil {
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetInt(intVal)
|
||||
// byte is an alias for uint8, so supporting uint8 breaks support for byte
|
||||
|
@ -176,24 +187,24 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
|||
|
||||
uintVal, err := key.Uint64()
|
||||
if err != nil {
|
||||
return nil
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetUint(uintVal)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
floatVal, err := key.Float64()
|
||||
if err != nil {
|
||||
return nil
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.SetFloat(floatVal)
|
||||
case reflectTime:
|
||||
timeVal, err := key.Time()
|
||||
if err != nil {
|
||||
return nil
|
||||
return wrapStrictError(err, isStrict)
|
||||
}
|
||||
field.Set(reflect.ValueOf(timeVal))
|
||||
case reflect.Slice:
|
||||
return setSliceWithProperType(key, field, delim, allowShadow)
|
||||
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
|
@ -212,7 +223,7 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo
|
|||
return rawName, omitEmpty, allowShadow
|
||||
}
|
||||
|
||||
func (s *Section) mapTo(val reflect.Value) error {
|
||||
func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
|
@ -241,7 +252,7 @@ func (s *Section) mapTo(val reflect.Value) error {
|
|||
|
||||
if isAnonymous || isStruct {
|
||||
if sec, err := s.f.GetSection(fieldName); err == nil {
|
||||
if err = sec.mapTo(field); err != nil {
|
||||
if err = sec.mapTo(field, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
|
@ -250,7 +261,7 @@ func (s *Section) mapTo(val reflect.Value) error {
|
|||
|
||||
if key, err := s.GetKey(fieldName); err == nil {
|
||||
delim := parseDelim(tpField.Tag.Get("delim"))
|
||||
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
|
||||
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
|
@ -269,7 +280,22 @@ func (s *Section) MapTo(v interface{}) error {
|
|||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val)
|
||||
return s.mapTo(val, false)
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (s *Section) StrictMapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val, true)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct.
|
||||
|
@ -277,6 +303,12 @@ func (f *File) MapTo(v interface{}) error {
|
|||
return f.Section("").MapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func (f *File) StrictMapTo(v interface{}) error {
|
||||
return f.Section("").StrictMapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct with name mapper.
|
||||
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
|
@ -287,11 +319,28 @@ func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, other
|
|||
return cfg.MapTo(v)
|
||||
}
|
||||
|
||||
// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.StrictMapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct.
|
||||
func MapTo(v, source interface{}, others ...interface{}) error {
|
||||
return MapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// StrictMapTo maps data sources to given struct in strict mode,
|
||||
// which returns all possible error including value parsing error.
|
||||
func StrictMapTo(v, source interface{}, others ...interface{}) error {
|
||||
return StrictMapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
|
||||
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
|
||||
slice := field.Slice(0, field.Len())
|
||||
|
@ -359,10 +408,11 @@ func isEmptyValue(v reflect.Value) bool {
|
|||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflectTime:
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflectTime:
|
||||
t, ok := v.Interface().(time.Time)
|
||||
return ok && t.IsZero()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
15
vendor/src/github.com/go-ini/ini/struct_test.go
vendored
15
vendor/src/github.com/go-ini/ini/struct_test.go
vendored
|
@ -229,6 +229,21 @@ func Test_Struct(t *testing.T) {
|
|||
})
|
||||
})
|
||||
|
||||
Convey("Map to struct in strict mode", t, func() {
|
||||
cfg, err := Load([]byte(`
|
||||
name=bruce
|
||||
age=a30`))
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
type Strict struct {
|
||||
Name string `ini:"name"`
|
||||
Age int `ini:"age"`
|
||||
}
|
||||
s := new(Strict)
|
||||
|
||||
So(cfg.Section("").StrictMapTo(s), ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Reflect from struct", t, func() {
|
||||
type Embeded struct {
|
||||
Dates []time.Time `delim:"|"`
|
||||
|
|
5
vendor/src/github.com/kurin/blazer/README.md
vendored
5
vendor/src/github.com/kurin/blazer/README.md
vendored
|
@ -77,10 +77,7 @@ Downloading is as simple as uploading:
|
|||
|
||||
```go
|
||||
func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, dst string) error {
|
||||
r, err := bucket.Object(src).NewReader(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bucket.Object(src).NewReader(ctx)
|
||||
defer r.Close()
|
||||
|
||||
f, err := file.Create(dst)
|
||||
|
|
16
vendor/src/github.com/kurin/blazer/b2/b2.go
vendored
16
vendor/src/github.com/kurin/blazer/b2/b2.go
vendored
|
@ -332,11 +332,9 @@ func (o *Object) Name() string {
|
|||
|
||||
// Attrs returns an object's attributes.
|
||||
func (o *Object) Attrs(ctx context.Context) (*Attrs, error) {
|
||||
f, err := o.b.b.downloadFileByName(ctx, o.name, 0, 1)
|
||||
if err != nil {
|
||||
if err := o.ensure(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.f = o.b.b.file(f.id())
|
||||
fi, err := o.f.getFileInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -585,20 +583,14 @@ func (b *Bucket) Reveal(ctx context.Context, name string) error {
|
|||
}
|
||||
|
||||
func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) {
|
||||
fs, _, err := b.b.listFileNames(ctx, 1, name, "", "")
|
||||
fr, err := b.b.downloadFileByName(ctx, name, 0, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(fs) < 1 {
|
||||
return nil, b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
|
||||
}
|
||||
f := fs[0]
|
||||
if f.name() != name {
|
||||
return nil, b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
|
||||
}
|
||||
fr.Close()
|
||||
return &Object{
|
||||
name: name,
|
||||
f: f,
|
||||
f: b.b.file(fr.id(), name),
|
||||
b: b,
|
||||
}, nil
|
||||
}
|
||||
|
|
25
vendor/src/github.com/kurin/blazer/b2/b2_test.go
vendored
25
vendor/src/github.com/kurin/blazer/b2/b2_test.go
vendored
|
@ -32,10 +32,12 @@ import (
|
|||
|
||||
const (
|
||||
bucketName = "b2-tests"
|
||||
smallFileName = "TeenyTiny"
|
||||
smallFileName = "Teeny Tiny"
|
||||
largeFileName = "BigBytes"
|
||||
)
|
||||
|
||||
var gmux = &sync.Mutex{}
|
||||
|
||||
type testError struct {
|
||||
retry bool
|
||||
backoff time.Duration
|
||||
|
@ -167,6 +169,8 @@ func (t *testBucket) startLargeFile(_ context.Context, name, _ string, _ map[str
|
|||
|
||||
func (t *testBucket) listFileNames(ctx context.Context, count int, cont, pfx, del string) ([]b2FileInterface, string, error) {
|
||||
var f []string
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
for name := range t.files {
|
||||
f = append(f, name)
|
||||
}
|
||||
|
@ -196,6 +200,8 @@ func (t *testBucket) listFileVersions(ctx context.Context, count int, a, b, c, d
|
|||
}
|
||||
|
||||
func (t *testBucket) downloadFileByName(_ context.Context, name string, offset, size int64) (b2FileReaderInterface, error) {
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
f := t.files[name]
|
||||
end := int(offset + size)
|
||||
if end >= len(f) {
|
||||
|
@ -216,7 +222,7 @@ func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Dura
|
|||
return "", nil
|
||||
}
|
||||
func (t *testBucket) baseURL() string { return "" }
|
||||
func (t *testBucket) file(id string) b2FileInterface { return nil }
|
||||
func (t *testBucket) file(id, name string) b2FileInterface { return nil }
|
||||
|
||||
type testURL struct {
|
||||
files map[string]string
|
||||
|
@ -229,6 +235,8 @@ func (t *testURL) uploadFile(_ context.Context, r io.Reader, _ int, name, _, _ s
|
|||
if _, err := io.Copy(buf, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
t.files[name] = buf.String()
|
||||
return &testFile{
|
||||
n: name,
|
||||
|
@ -239,7 +247,6 @@ func (t *testURL) uploadFile(_ context.Context, r io.Reader, _ int, name, _, _ s
|
|||
|
||||
type testLargeFile struct {
|
||||
name string
|
||||
mux sync.Mutex
|
||||
parts map[int][]byte
|
||||
files map[string]string
|
||||
errs *errCont
|
||||
|
@ -247,6 +254,8 @@ type testLargeFile struct {
|
|||
|
||||
func (t *testLargeFile) finishLargeFile(context.Context) (b2FileInterface, error) {
|
||||
var total []byte
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
for i := 1; i <= len(t.parts); i++ {
|
||||
total = append(total, t.parts[i]...)
|
||||
}
|
||||
|
@ -259,15 +268,15 @@ func (t *testLargeFile) finishLargeFile(context.Context) (b2FileInterface, error
|
|||
}
|
||||
|
||||
func (t *testLargeFile) getUploadPartURL(context.Context) (b2FileChunkInterface, error) {
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
return &testFileChunk{
|
||||
parts: t.parts,
|
||||
mux: &t.mux,
|
||||
errs: t.errs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type testFileChunk struct {
|
||||
mux *sync.Mutex
|
||||
parts map[int][]byte
|
||||
errs *errCont
|
||||
}
|
||||
|
@ -283,9 +292,9 @@ func (t *testFileChunk) uploadPart(_ context.Context, r io.Reader, _ string, _,
|
|||
if err != nil {
|
||||
return int(i), err
|
||||
}
|
||||
t.mux.Lock()
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
t.parts[index] = buf.Bytes()
|
||||
t.mux.Unlock()
|
||||
return int(i), nil
|
||||
}
|
||||
|
||||
|
@ -315,6 +324,8 @@ func (t *testFile) listParts(context.Context, int, int) ([]b2FilePartInterface,
|
|||
}
|
||||
|
||||
func (t *testFile) deleteFileVersion(context.Context) error {
|
||||
gmux.Lock()
|
||||
defer gmux.Unlock()
|
||||
delete(t.files, t.n)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ type beBucketInterface interface {
|
|||
hideFile(context.Context, string) (beFileInterface, error)
|
||||
getDownloadAuthorization(context.Context, string, time.Duration) (string, error)
|
||||
baseURL() string
|
||||
file(string) beFileInterface
|
||||
file(string, string) beFileInterface
|
||||
}
|
||||
|
||||
type beBucket struct {
|
||||
|
@ -407,9 +407,9 @@ func (b *beBucket) baseURL() string {
|
|||
return b.b2bucket.baseURL()
|
||||
}
|
||||
|
||||
func (b *beBucket) file(id string) beFileInterface {
|
||||
func (b *beBucket) file(id, name string) beFileInterface {
|
||||
return &beFile{
|
||||
b2file: b.b2bucket.file(id),
|
||||
b2file: b.b2bucket.file(id, name),
|
||||
ri: b.ri,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ type b2BucketInterface interface {
|
|||
hideFile(context.Context, string) (b2FileInterface, error)
|
||||
getDownloadAuthorization(context.Context, string, time.Duration) (string, error)
|
||||
baseURL() string
|
||||
file(string) b2FileInterface
|
||||
file(string, string) b2FileInterface
|
||||
}
|
||||
|
||||
type b2URLInterface interface {
|
||||
|
@ -315,8 +315,12 @@ func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, ne
|
|||
func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (b2FileReaderInterface, error) {
|
||||
fr, err := b.b.DownloadFileByName(ctx, name, offset, size)
|
||||
if err != nil {
|
||||
if code, _ := base.Code(err); code == http.StatusRequestedRangeNotSatisfiable {
|
||||
code, _ := base.Code(err)
|
||||
switch code {
|
||||
case http.StatusRequestedRangeNotSatisfiable:
|
||||
return nil, errNoMoreContent
|
||||
case http.StatusNotFound:
|
||||
return nil, b2err{err: err, notFoundErr: true}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -339,7 +343,7 @@ func (b *b2Bucket) baseURL() string {
|
|||
return b.b.BaseURL()
|
||||
}
|
||||
|
||||
func (b *b2Bucket) file(id string) b2FileInterface { return &b2File{b.b.File(id)} }
|
||||
func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} }
|
||||
|
||||
func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) {
|
||||
file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info)
|
||||
|
@ -374,6 +378,9 @@ func (b *b2File) status() string {
|
|||
}
|
||||
|
||||
func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) {
|
||||
if b.b.Info != nil {
|
||||
return &b2FileInfo{b.b.Info}, nil
|
||||
}
|
||||
fi, err := b.b.GetFileInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -219,6 +220,14 @@ func TestAttrs(t *testing.T) {
|
|||
LastModified: time.Unix(1464370149, 142000000),
|
||||
Info: map[string]string{}, // can't be nil
|
||||
},
|
||||
&Attrs{
|
||||
ContentType: "arbitrarystring",
|
||||
Info: map[string]string{
|
||||
"spaces": "string with spaces",
|
||||
"unicode": "日本語",
|
||||
"special": "&/!@_.~",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
|
@ -615,6 +624,105 @@ func TestDuelingBuckets(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNotExist(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
if _, err := bucket.Object("not there").Attrs(ctx); !IsNotExist(err) {
|
||||
t.Errorf("IsNotExist() on nonexistent object returned false (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteEmpty(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
_, _, err := writeFile(ctx, bucket, smallFileName, 0, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type rtCounter struct {
|
||||
rt http.RoundTripper
|
||||
trips int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (rt *rtCounter) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
rt.Lock()
|
||||
defer rt.Unlock()
|
||||
rt.trips++
|
||||
return rt.rt.RoundTrip(r)
|
||||
}
|
||||
|
||||
func TestAttrsNoRoundtrip(t *testing.T) {
|
||||
rt := &rtCounter{rt: transport}
|
||||
transport = rt
|
||||
defer func() {
|
||||
transport = rt.rt
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
_, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
objs, _, err := bucket.ListObjects(ctx, 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(objs) != 1 {
|
||||
t.Fatal("unexpected objects: got %d, want 1", len(objs))
|
||||
}
|
||||
|
||||
trips := rt.trips
|
||||
attrs, err := objs[0].Attrs(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if attrs.Name != smallFileName {
|
||||
t.Errorf("got the wrong object: got %q, want %q", attrs.Name, smallFileName)
|
||||
}
|
||||
|
||||
if trips != rt.trips {
|
||||
t.Errorf("Attrs() should not have caused any net traffic, but it did: old %d, new %d", trips, rt.trips)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteWithoutName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
bucket, done := startLiveTest(ctx, t)
|
||||
defer done()
|
||||
|
||||
_, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := bucket.Object(smallFileName).Delete(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type object struct {
|
||||
o *Object
|
||||
err error
|
||||
|
@ -655,6 +763,8 @@ func listObjects(ctx context.Context, f func(context.Context, int, *Cursor) ([]*
|
|||
return ch
|
||||
}
|
||||
|
||||
var transport = http.DefaultTransport
|
||||
|
||||
func startLiveTest(ctx context.Context, t *testing.T) (*Bucket, func()) {
|
||||
id := os.Getenv(apiID)
|
||||
key := os.Getenv(apiKey)
|
||||
|
@ -662,7 +772,7 @@ func startLiveTest(ctx context.Context, t *testing.T) (*Bucket, func()) {
|
|||
t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests")
|
||||
return nil, nil
|
||||
}
|
||||
client, err := NewClient(ctx, id, key, FailSomeUploads(), ExpireSomeAuthTokens())
|
||||
client, err := NewClient(ctx, id, key, FailSomeUploads(), ExpireSomeAuthTokens(), Transport(transport))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, nil
|
||||
|
|
14
vendor/src/github.com/kurin/blazer/b2/writer.go
vendored
14
vendor/src/github.com/kurin/blazer/b2/writer.go
vendored
|
@ -74,6 +74,7 @@ type Writer struct {
|
|||
done sync.Once
|
||||
file beLargeFileInterface
|
||||
seen map[int]string
|
||||
everStarted bool
|
||||
|
||||
o *Object
|
||||
name string
|
||||
|
@ -202,6 +203,7 @@ func (w *Writer) thread() {
|
|||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (int, error) {
|
||||
w.start.Do(func() {
|
||||
w.everStarted = true
|
||||
w.smux.Lock()
|
||||
w.smap = make(map[int]*meteredReader)
|
||||
w.smux.Unlock()
|
||||
|
@ -362,6 +364,9 @@ func (w *Writer) sendChunk() error {
|
|||
// value of Close on all writers.
|
||||
func (w *Writer) Close() error {
|
||||
w.done.Do(func() {
|
||||
if !w.everStarted {
|
||||
return
|
||||
}
|
||||
defer w.o.b.c.removeWriter(w)
|
||||
defer w.w.Close() // TODO: log error
|
||||
if w.cidx == 0 {
|
||||
|
@ -419,16 +424,21 @@ type meteredReader struct {
|
|||
read int64
|
||||
size int
|
||||
r io.ReadSeeker
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func (mr *meteredReader) Read(p []byte) (int, error) {
|
||||
mr.mux.Lock()
|
||||
defer mr.mux.Unlock()
|
||||
n, err := mr.r.Read(p)
|
||||
atomic.AddInt64(&mr.read, int64(n))
|
||||
mr.read += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (mr *meteredReader) Seek(offset int64, whence int) (int64, error) {
|
||||
atomic.StoreInt64(&mr.read, offset)
|
||||
mr.mux.Lock()
|
||||
defer mr.mux.Unlock()
|
||||
mr.read = offset
|
||||
return mr.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
|
|
62
vendor/src/github.com/kurin/blazer/base/base.go
vendored
62
vendor/src/github.com/kurin/blazer/base/base.go
vendored
|
@ -277,7 +277,7 @@ func (rb *requestBody) getBody() io.Reader {
|
|||
|
||||
var reqID int64
|
||||
|
||||
func (o *b2Options) makeRequest(ctx context.Context, method, verb, url string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error {
|
||||
func (o *b2Options) makeRequest(ctx context.Context, method, verb, uri string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error {
|
||||
var args []byte
|
||||
if b2req != nil {
|
||||
enc, err := json.Marshal(b2req)
|
||||
|
@ -290,12 +290,15 @@ func (o *b2Options) makeRequest(ctx context.Context, method, verb, url string, b
|
|||
size: int64(len(enc)),
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest(verb, url, body.getBody())
|
||||
req, err := http.NewRequest(verb, uri, body.getBody())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = body.getSize()
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(k, "X-Bz-Info") || strings.HasPrefix(k, "X-Bz-File-Name") {
|
||||
v = escape(v)
|
||||
}
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1)))
|
||||
|
@ -322,6 +325,7 @@ func (o *b2Options) makeRequest(ctx context.Context, method, verb, url string, b
|
|||
}
|
||||
if reply.err != nil {
|
||||
// Connection errors are retryable.
|
||||
blog.V(2).Infof(">> %s uri: %v err: %v", method, req.URL, reply.err)
|
||||
return b2err{
|
||||
msg: reply.err.Error(),
|
||||
retry: 1,
|
||||
|
@ -613,20 +617,21 @@ type File struct {
|
|||
Size int64
|
||||
Status string
|
||||
Timestamp time.Time
|
||||
Info *FileInfo
|
||||
id string
|
||||
b2 *B2
|
||||
}
|
||||
|
||||
// File returns a bare File struct, but with the appropriate id and b2
|
||||
// interfaces.
|
||||
func (b *Bucket) File(id string) *File {
|
||||
return &File{id: id, b2: b.b2}
|
||||
func (b *Bucket) File(id, name string) *File {
|
||||
return &File{id: id, b2: b.b2, Name: name}
|
||||
}
|
||||
|
||||
// UploadFile wraps b2_upload_file.
|
||||
func (url *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) {
|
||||
func (u *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) {
|
||||
headers := map[string]string{
|
||||
"Authorization": url.token,
|
||||
"Authorization": u.token,
|
||||
"X-Bz-File-Name": name,
|
||||
"Content-Type": contentType,
|
||||
"Content-Length": fmt.Sprintf("%d", size),
|
||||
|
@ -636,7 +641,7 @@ func (url *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, con
|
|||
headers[fmt.Sprintf("X-Bz-Info-%s", k)] = v
|
||||
}
|
||||
b2resp := &b2types.UploadFileResponse{}
|
||||
if err := url.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", url.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil {
|
||||
if err := u.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", u.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &File{
|
||||
|
@ -645,7 +650,7 @@ func (url *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, con
|
|||
Timestamp: millitime(b2resp.Timestamp),
|
||||
Status: b2resp.Action,
|
||||
id: b2resp.FileID,
|
||||
b2: url.b2,
|
||||
b2: u.b2,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -868,6 +873,15 @@ func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, pre
|
|||
Size: f.Size,
|
||||
Status: f.Action,
|
||||
Timestamp: millitime(f.Timestamp),
|
||||
Info: &FileInfo{
|
||||
Name: f.Name,
|
||||
SHA1: f.SHA1,
|
||||
Size: f.Size,
|
||||
ContentType: f.ContentType,
|
||||
Info: f.Info,
|
||||
Status: f.Action,
|
||||
Timestamp: millitime(f.Timestamp),
|
||||
},
|
||||
id: f.FileID,
|
||||
b2: b.b2,
|
||||
})
|
||||
|
@ -899,6 +913,15 @@ func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, sta
|
|||
Size: f.Size,
|
||||
Status: f.Action,
|
||||
Timestamp: millitime(f.Timestamp),
|
||||
Info: &FileInfo{
|
||||
Name: f.Name,
|
||||
SHA1: f.SHA1,
|
||||
Size: f.Size,
|
||||
ContentType: f.ContentType,
|
||||
Info: f.Info,
|
||||
Status: f.Action,
|
||||
Timestamp: millitime(f.Timestamp),
|
||||
},
|
||||
id: f.FileID,
|
||||
b2: b.b2,
|
||||
})
|
||||
|
@ -945,8 +968,8 @@ func mkRange(offset, size int64) string {
|
|||
|
||||
// DownloadFileByName wraps b2_download_file_by_name.
|
||||
func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64) (*FileReader, error) {
|
||||
url := fmt.Sprintf("%s/file/%s/%s", b.b2.downloadURI, b.Name, name)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
uri := fmt.Sprintf("%s/file/%s/%s", b.b2.downloadURI, b.Name, name)
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -978,6 +1001,7 @@ func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, si
|
|||
}
|
||||
clen, err := strconv.ParseInt(reply.resp.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
reply.resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
info := make(map[string]string)
|
||||
|
@ -985,8 +1009,17 @@ func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, si
|
|||
if !strings.HasPrefix(key, "X-Bz-Info-") {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimPrefix(key, "X-Bz-Info-")
|
||||
info[name] = reply.resp.Header.Get(key)
|
||||
name, err := unescape(strings.TrimPrefix(key, "X-Bz-Info-"))
|
||||
if err != nil {
|
||||
reply.resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
val, err := unescape(reply.resp.Header.Get(key))
|
||||
if err != nil {
|
||||
reply.resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
info[name] = val
|
||||
}
|
||||
return &FileReader{
|
||||
ReadCloser: reply.resp.Body,
|
||||
|
@ -1046,7 +1079,7 @@ func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) {
|
|||
f.Status = b2resp.Action
|
||||
f.Name = b2resp.Name
|
||||
f.Timestamp = millitime(b2resp.Timestamp)
|
||||
return &FileInfo{
|
||||
f.Info = &FileInfo{
|
||||
Name: b2resp.Name,
|
||||
SHA1: b2resp.SHA1,
|
||||
Size: b2resp.Size,
|
||||
|
@ -1054,5 +1087,6 @@ func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) {
|
|||
Info: b2resp.Info,
|
||||
Status: b2resp.Action,
|
||||
Timestamp: millitime(b2resp.Timestamp),
|
||||
}, nil
|
||||
}
|
||||
return f.Info, nil
|
||||
}
|
||||
|
|
|
@ -17,10 +17,12 @@ package base
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -277,3 +279,140 @@ func compareFileAndInfo(t *testing.T, info *FileInfo, name, sha1 string, imap ma
|
|||
t.Errorf("got %v, want %v", info.Info, imap)
|
||||
}
|
||||
}
|
||||
|
||||
// from https://www.backblaze.com/b2/docs/string_encoding.html
|
||||
var testCases = `[
|
||||
{"fullyEncoded": "%20", "minimallyEncoded": "+", "string": " "},
|
||||
{"fullyEncoded": "%21", "minimallyEncoded": "!", "string": "!"},
|
||||
{"fullyEncoded": "%22", "minimallyEncoded": "%22", "string": "\""},
|
||||
{"fullyEncoded": "%23", "minimallyEncoded": "%23", "string": "#"},
|
||||
{"fullyEncoded": "%24", "minimallyEncoded": "$", "string": "$"},
|
||||
{"fullyEncoded": "%25", "minimallyEncoded": "%25", "string": "%"},
|
||||
{"fullyEncoded": "%26", "minimallyEncoded": "%26", "string": "&"},
|
||||
{"fullyEncoded": "%27", "minimallyEncoded": "'", "string": "'"},
|
||||
{"fullyEncoded": "%28", "minimallyEncoded": "(", "string": "("},
|
||||
{"fullyEncoded": "%29", "minimallyEncoded": ")", "string": ")"},
|
||||
{"fullyEncoded": "%2A", "minimallyEncoded": "*", "string": "*"},
|
||||
{"fullyEncoded": "%2B", "minimallyEncoded": "%2B", "string": "+"},
|
||||
{"fullyEncoded": "%2C", "minimallyEncoded": "%2C", "string": ","},
|
||||
{"fullyEncoded": "%2D", "minimallyEncoded": "-", "string": "-"},
|
||||
{"fullyEncoded": "%2E", "minimallyEncoded": ".", "string": "."},
|
||||
{"fullyEncoded": "/", "minimallyEncoded": "/", "string": "/"},
|
||||
{"fullyEncoded": "%30", "minimallyEncoded": "0", "string": "0"},
|
||||
{"fullyEncoded": "%31", "minimallyEncoded": "1", "string": "1"},
|
||||
{"fullyEncoded": "%32", "minimallyEncoded": "2", "string": "2"},
|
||||
{"fullyEncoded": "%33", "minimallyEncoded": "3", "string": "3"},
|
||||
{"fullyEncoded": "%34", "minimallyEncoded": "4", "string": "4"},
|
||||
{"fullyEncoded": "%35", "minimallyEncoded": "5", "string": "5"},
|
||||
{"fullyEncoded": "%36", "minimallyEncoded": "6", "string": "6"},
|
||||
{"fullyEncoded": "%37", "minimallyEncoded": "7", "string": "7"},
|
||||
{"fullyEncoded": "%38", "minimallyEncoded": "8", "string": "8"},
|
||||
{"fullyEncoded": "%39", "minimallyEncoded": "9", "string": "9"},
|
||||
{"fullyEncoded": "%3A", "minimallyEncoded": ":", "string": ":"},
|
||||
{"fullyEncoded": "%3B", "minimallyEncoded": ";", "string": ";"},
|
||||
{"fullyEncoded": "%3C", "minimallyEncoded": "%3C", "string": "<"},
|
||||
{"fullyEncoded": "%3D", "minimallyEncoded": "=", "string": "="},
|
||||
{"fullyEncoded": "%3E", "minimallyEncoded": "%3E", "string": ">"},
|
||||
{"fullyEncoded": "%3F", "minimallyEncoded": "%3F", "string": "?"},
|
||||
{"fullyEncoded": "%40", "minimallyEncoded": "@", "string": "@"},
|
||||
{"fullyEncoded": "%41", "minimallyEncoded": "A", "string": "A"},
|
||||
{"fullyEncoded": "%42", "minimallyEncoded": "B", "string": "B"},
|
||||
{"fullyEncoded": "%43", "minimallyEncoded": "C", "string": "C"},
|
||||
{"fullyEncoded": "%44", "minimallyEncoded": "D", "string": "D"},
|
||||
{"fullyEncoded": "%45", "minimallyEncoded": "E", "string": "E"},
|
||||
{"fullyEncoded": "%46", "minimallyEncoded": "F", "string": "F"},
|
||||
{"fullyEncoded": "%47", "minimallyEncoded": "G", "string": "G"},
|
||||
{"fullyEncoded": "%48", "minimallyEncoded": "H", "string": "H"},
|
||||
{"fullyEncoded": "%49", "minimallyEncoded": "I", "string": "I"},
|
||||
{"fullyEncoded": "%4A", "minimallyEncoded": "J", "string": "J"},
|
||||
{"fullyEncoded": "%4B", "minimallyEncoded": "K", "string": "K"},
|
||||
{"fullyEncoded": "%4C", "minimallyEncoded": "L", "string": "L"},
|
||||
{"fullyEncoded": "%4D", "minimallyEncoded": "M", "string": "M"},
|
||||
{"fullyEncoded": "%4E", "minimallyEncoded": "N", "string": "N"},
|
||||
{"fullyEncoded": "%4F", "minimallyEncoded": "O", "string": "O"},
|
||||
{"fullyEncoded": "%50", "minimallyEncoded": "P", "string": "P"},
|
||||
{"fullyEncoded": "%51", "minimallyEncoded": "Q", "string": "Q"},
|
||||
{"fullyEncoded": "%52", "minimallyEncoded": "R", "string": "R"},
|
||||
{"fullyEncoded": "%53", "minimallyEncoded": "S", "string": "S"},
|
||||
{"fullyEncoded": "%54", "minimallyEncoded": "T", "string": "T"},
|
||||
{"fullyEncoded": "%55", "minimallyEncoded": "U", "string": "U"},
|
||||
{"fullyEncoded": "%56", "minimallyEncoded": "V", "string": "V"},
|
||||
{"fullyEncoded": "%57", "minimallyEncoded": "W", "string": "W"},
|
||||
{"fullyEncoded": "%58", "minimallyEncoded": "X", "string": "X"},
|
||||
{"fullyEncoded": "%59", "minimallyEncoded": "Y", "string": "Y"},
|
||||
{"fullyEncoded": "%5A", "minimallyEncoded": "Z", "string": "Z"},
|
||||
{"fullyEncoded": "%5B", "minimallyEncoded": "%5B", "string": "["},
|
||||
{"fullyEncoded": "%5C", "minimallyEncoded": "%5C", "string": "\\"},
|
||||
{"fullyEncoded": "%5D", "minimallyEncoded": "%5D", "string": "]"},
|
||||
{"fullyEncoded": "%5E", "minimallyEncoded": "%5E", "string": "^"},
|
||||
{"fullyEncoded": "%5F", "minimallyEncoded": "_", "string": "_"},
|
||||
{"fullyEncoded": "%60", "minimallyEncoded": "%60", "string": "` + "`" + `"},
|
||||
{"fullyEncoded": "%61", "minimallyEncoded": "a", "string": "a"},
|
||||
{"fullyEncoded": "%62", "minimallyEncoded": "b", "string": "b"},
|
||||
{"fullyEncoded": "%63", "minimallyEncoded": "c", "string": "c"},
|
||||
{"fullyEncoded": "%64", "minimallyEncoded": "d", "string": "d"},
|
||||
{"fullyEncoded": "%65", "minimallyEncoded": "e", "string": "e"},
|
||||
{"fullyEncoded": "%66", "minimallyEncoded": "f", "string": "f"},
|
||||
{"fullyEncoded": "%67", "minimallyEncoded": "g", "string": "g"},
|
||||
{"fullyEncoded": "%68", "minimallyEncoded": "h", "string": "h"},
|
||||
{"fullyEncoded": "%69", "minimallyEncoded": "i", "string": "i"},
|
||||
{"fullyEncoded": "%6A", "minimallyEncoded": "j", "string": "j"},
|
||||
{"fullyEncoded": "%6B", "minimallyEncoded": "k", "string": "k"},
|
||||
{"fullyEncoded": "%6C", "minimallyEncoded": "l", "string": "l"},
|
||||
{"fullyEncoded": "%6D", "minimallyEncoded": "m", "string": "m"},
|
||||
{"fullyEncoded": "%6E", "minimallyEncoded": "n", "string": "n"},
|
||||
{"fullyEncoded": "%6F", "minimallyEncoded": "o", "string": "o"},
|
||||
{"fullyEncoded": "%70", "minimallyEncoded": "p", "string": "p"},
|
||||
{"fullyEncoded": "%71", "minimallyEncoded": "q", "string": "q"},
|
||||
{"fullyEncoded": "%72", "minimallyEncoded": "r", "string": "r"},
|
||||
{"fullyEncoded": "%73", "minimallyEncoded": "s", "string": "s"},
|
||||
{"fullyEncoded": "%74", "minimallyEncoded": "t", "string": "t"},
|
||||
{"fullyEncoded": "%75", "minimallyEncoded": "u", "string": "u"},
|
||||
{"fullyEncoded": "%76", "minimallyEncoded": "v", "string": "v"},
|
||||
{"fullyEncoded": "%77", "minimallyEncoded": "w", "string": "w"},
|
||||
{"fullyEncoded": "%78", "minimallyEncoded": "x", "string": "x"},
|
||||
{"fullyEncoded": "%79", "minimallyEncoded": "y", "string": "y"},
|
||||
{"fullyEncoded": "%7A", "minimallyEncoded": "z", "string": "z"},
|
||||
{"fullyEncoded": "%7B", "minimallyEncoded": "%7B", "string": "{"},
|
||||
{"fullyEncoded": "%7C", "minimallyEncoded": "%7C", "string": "|"},
|
||||
{"fullyEncoded": "%7D", "minimallyEncoded": "%7D", "string": "}"},
|
||||
{"fullyEncoded": "%7E", "minimallyEncoded": "~", "string": "~"},
|
||||
{"fullyEncoded": "%7F", "minimallyEncoded": "%7F", "string": "\u007f"},
|
||||
{"fullyEncoded": "%E8%87%AA%E7%94%B1", "minimallyEncoded": "%E8%87%AA%E7%94%B1", "string": "\u81ea\u7531"},
|
||||
{"fullyEncoded": "%F0%90%90%80", "minimallyEncoded": "%F0%90%90%80", "string": "\ud801\udc00"}
|
||||
]`
|
||||
|
||||
type testCase struct {
|
||||
Full string `json:"fullyEncoded"`
|
||||
Min string `json:"minimallyEncoded"`
|
||||
Raw string `json:"string"`
|
||||
}
|
||||
|
||||
func TestEscapes(t *testing.T) {
|
||||
dec := json.NewDecoder(strings.NewReader(testCases))
|
||||
var tcs []testCase
|
||||
if err := dec.Decode(&tcs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
en := escape(tc.Raw)
|
||||
if !(en == tc.Full || en == tc.Min) {
|
||||
t.Errorf("encode %q: got %q, want %q or %q", tc.Raw, en, tc.Min, tc.Full)
|
||||
}
|
||||
|
||||
m, err := unescape(tc.Min)
|
||||
if err != nil {
|
||||
t.Errorf("decode %q: %v", tc.Min, err)
|
||||
}
|
||||
if m != tc.Raw {
|
||||
t.Errorf("decode %q: got %q, want %q", tc.Min, m, tc.Raw)
|
||||
}
|
||||
f, err := unescape(tc.Full)
|
||||
if err != nil {
|
||||
t.Errorf("decode %q: %v", tc.Full, err)
|
||||
}
|
||||
if f != tc.Raw {
|
||||
t.Errorf("decode %q: got %q, want %q", tc.Full, f, tc.Raw)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
81
vendor/src/github.com/kurin/blazer/base/strings.go
vendored
Normal file
81
vendor/src/github.com/kurin/blazer/base/strings.go
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func noEscape(c byte) bool {
|
||||
switch c {
|
||||
case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escape(s string) string {
|
||||
// cribbed from url.go, kinda
|
||||
b := &bytes.Buffer{}
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == '/':
|
||||
b.WriteByte(c)
|
||||
case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9':
|
||||
b.WriteByte(c)
|
||||
case noEscape(c):
|
||||
b.WriteByte(c)
|
||||
default:
|
||||
fmt.Fprintf(b, "%%%X", c)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func unescape(s string) (string, error) {
|
||||
b := &bytes.Buffer{}
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch c {
|
||||
case '/':
|
||||
b.WriteString("/")
|
||||
case '+':
|
||||
b.WriteString(" ")
|
||||
case '%':
|
||||
if len(s)-i < 3 {
|
||||
return "", errors.New("unescape: bad encoding")
|
||||
}
|
||||
b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2]))
|
||||
i += 2
|
||||
default:
|
||||
b.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func unhex(c byte) byte {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0'
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10
|
||||
}
|
||||
return 0
|
||||
}
|
134
vendor/src/github.com/kurin/blazer/examples/simple/simple.go
vendored
Normal file
134
vendor/src/github.com/kurin/blazer/examples/simple/simple.go
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2017, Google
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a simple program that will copy named files into or out of B2.
|
||||
//
|
||||
// To copy a file into B2:
|
||||
//
|
||||
// B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple /path/to/file b2://bucket/path/to/dst
|
||||
//
|
||||
// To copy a file out:
|
||||
//
|
||||
// B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple b2://bucket/path/to/file /path/to/dst
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/kurin/blazer/b2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
b2id := os.Getenv("B2_ACCOUNT_ID")
|
||||
b2key := os.Getenv("B2_ACCOUNT_KEY")
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) != 2 {
|
||||
fmt.Printf("Usage:\n\nsimple [src] [dst]\n")
|
||||
return
|
||||
}
|
||||
src, dst := args[0], args[1]
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := b2.NewClient(ctx, b2id, b2key)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
var r io.ReadCloser
|
||||
var w io.WriteCloser
|
||||
|
||||
if strings.HasPrefix(src, "b2://") {
|
||||
reader, err := b2Reader(ctx, c, src)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
r = reader
|
||||
} else {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
r = f
|
||||
}
|
||||
// Readers do not need their errors checked on close. (Also it's a little
|
||||
// silly to defer this in main(), but.)
|
||||
defer r.Close()
|
||||
|
||||
if strings.HasPrefix(dst, "b2://") {
|
||||
writer, err := b2Writer(ctx, c, dst)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
w = writer
|
||||
} else {
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
w = f
|
||||
}
|
||||
|
||||
// Copy and check error.
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// It is very important to check the error of the writer.
|
||||
if err := w.Close(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func b2Reader(ctx context.Context, c *b2.Client, path string) (io.ReadCloser, error) {
|
||||
o, err := b2Obj(ctx, c, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o.NewReader(ctx), nil
|
||||
}
|
||||
|
||||
func b2Writer(ctx context.Context, c *b2.Client, path string) (io.WriteCloser, error) {
|
||||
o, err := b2Obj(ctx, c, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o.NewWriter(ctx), nil
|
||||
}
|
||||
|
||||
func b2Obj(ctx context.Context, c *b2.Client, path string) (*b2.Object, error) {
|
||||
uri, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket, err := c.Bucket(ctx, uri.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// B2 paths must not begin with /, so trim it here.
|
||||
return bucket.Object(strings.TrimPrefix(uri.Path, "/")), nil
|
||||
}
|
|
@ -172,13 +172,7 @@ type ListFileNamesRequest struct {
|
|||
|
||||
type ListFileNamesResponse struct {
|
||||
Continuation string `json:"nextFileName"`
|
||||
Files []struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"fileName"`
|
||||
Size int64 `json:"size"`
|
||||
Action string `json:"action"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
} `json:"files"`
|
||||
Files []GetFileInfoResponse `json:"files"`
|
||||
}
|
||||
|
||||
type ListFileVersionsRequest struct {
|
||||
|
@ -193,13 +187,7 @@ type ListFileVersionsRequest struct {
|
|||
type ListFileVersionsResponse struct {
|
||||
NextName string `json:"nextFileName"`
|
||||
NextID string `json:"nextFileId"`
|
||||
Files []struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"fileName"`
|
||||
Size int64 `json:"size"`
|
||||
Action string `json:"action"`
|
||||
Timestamp int64 `json:"uploadTimestamp"`
|
||||
} `json:"files"`
|
||||
Files []GetFileInfoResponse `json:"files"`
|
||||
}
|
||||
|
||||
type HideFileRequest struct {
|
||||
|
@ -218,6 +206,7 @@ type GetFileInfoRequest struct {
|
|||
}
|
||||
|
||||
type GetFileInfoResponse struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"fileName"`
|
||||
SHA1 string `json:"contentSha1"`
|
||||
Size int64 `json:"contentLength"`
|
||||
|
|
|
@ -34,7 +34,7 @@ func dir() (string, error) {
|
|||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If "getent" is missing, ignore it
|
||||
if err == exec.ErrNotFound {
|
||||
if err != exec.ErrNotFound {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -10,6 +10,10 @@ import (
|
|||
|
||||
// dir returns the homedir of current user for MS Windows OS.
|
||||
func dir() (string, error) {
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home, nil
|
||||
}
|
||||
drive := os.Getenv("HOMEDRIVE")
|
||||
path := os.Getenv("HOMEPATH")
|
||||
home := drive + path
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package homedir
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -66,7 +66,7 @@ func TestExpand(t *testing.T) {
|
|||
|
||||
{
|
||||
"~/foo",
|
||||
fmt.Sprintf("%s/foo", u.HomeDir),
|
||||
filepath.Join(u.HomeDir, "foo"),
|
||||
false,
|
||||
},
|
||||
|
||||
|
@ -103,12 +103,12 @@ func TestExpand(t *testing.T) {
|
|||
DisableCache = true
|
||||
defer func() { DisableCache = false }()
|
||||
defer patchEnv("HOME", "/custom/path/")()
|
||||
expected := "/custom/path/foo/bar"
|
||||
expected := filepath.Join("/", "custom", "path", "foo/bar")
|
||||
actual, err := Expand("~/foo/bar")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("No error is expected, got: %v", err)
|
||||
} else if actual != "/custom/path/foo/bar" {
|
||||
} else if actual != expected {
|
||||
t.Errorf("Expected: %v; actual: %v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
|
33
vendor/src/github.com/ncw/swift/swift_test.go
vendored
33
vendor/src/github.com/ncw/swift/swift_test.go
vendored
|
@ -75,6 +75,7 @@ func makeConnection(t *testing.T) (*swift.Connection, func()) {
|
|||
ConnectionChannelTimeout := os.Getenv("SWIFT_CONNECTION_CHANNEL_TIMEOUT")
|
||||
DataChannelTimeout := os.Getenv("SWIFT_DATA_CHANNEL_TIMEOUT")
|
||||
|
||||
internalServer := false
|
||||
if UserName == "" || ApiKey == "" || AuthUrl == "" {
|
||||
srv, err = swifttest.NewSwiftServer("localhost")
|
||||
if err != nil && t != nil {
|
||||
|
@ -84,6 +85,7 @@ func makeConnection(t *testing.T) (*swift.Connection, func()) {
|
|||
UserName = "swifttest"
|
||||
ApiKey = "swifttest"
|
||||
AuthUrl = srv.AuthURL
|
||||
internalServer = true
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
|
@ -105,6 +107,16 @@ func makeConnection(t *testing.T) (*swift.Connection, func()) {
|
|||
EndpointType: swift.EndpointType(EndpointType),
|
||||
}
|
||||
|
||||
if !internalServer {
|
||||
if isV3Api() {
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
|
||||
} else {
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
|
||||
}
|
||||
}
|
||||
|
||||
var timeout int64
|
||||
if ConnectionChannelTimeout != "" {
|
||||
timeout, err = strconv.ParseInt(ConnectionChannelTimeout, 10, 32)
|
||||
|
@ -304,14 +316,6 @@ func TestTransport(t *testing.T) {
|
|||
|
||||
c.Transport = tr
|
||||
|
||||
if isV3Api() {
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
|
||||
} else {
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
|
||||
}
|
||||
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal("Auth failed", err)
|
||||
|
@ -329,9 +333,6 @@ func TestV1V2Authenticate(t *testing.T) {
|
|||
c, rollback := makeConnection(t)
|
||||
defer rollback()
|
||||
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
|
||||
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
t.Fatal("Auth failed", err)
|
||||
|
@ -349,8 +350,10 @@ func TestV3AuthenticateWithDomainNameAndTenantId(t *testing.T) {
|
|||
c, rollback := makeConnection(t)
|
||||
defer rollback()
|
||||
|
||||
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
|
||||
c.Tenant = ""
|
||||
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
|
||||
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
|
||||
c.DomainId = ""
|
||||
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
|
@ -388,6 +391,8 @@ func TestV3AuthenticateWithDomainIdAndTenantId(t *testing.T) {
|
|||
c, rollback := makeConnection(t)
|
||||
defer rollback()
|
||||
|
||||
c.Tenant = ""
|
||||
c.Domain = ""
|
||||
c.TenantId = os.Getenv("SWIFT_TENANT_ID")
|
||||
c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID")
|
||||
|
||||
|
@ -410,6 +415,8 @@ func TestV3AuthenticateWithDomainNameAndTenantName(t *testing.T) {
|
|||
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.Domain = os.Getenv("SWIFT_API_DOMAIN")
|
||||
c.TenantId = ""
|
||||
c.DomainId = ""
|
||||
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
|
@ -429,6 +436,8 @@ func TestV3AuthenticateWithDomainIdAndTenantName(t *testing.T) {
|
|||
defer rollback()
|
||||
|
||||
c.Tenant = os.Getenv("SWIFT_TENANT")
|
||||
c.Domain = ""
|
||||
c.TenantId = ""
|
||||
c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID")
|
||||
|
||||
err := c.Authenticate()
|
||||
|
|
|
@ -15,6 +15,7 @@ func noErrors(at, depth int) error {
|
|||
}
|
||||
return noErrors(at+1, depth)
|
||||
}
|
||||
|
||||
func yesErrors(at, depth int) error {
|
||||
if at >= depth {
|
||||
return New("ye error")
|
||||
|
@ -22,8 +23,11 @@ func yesErrors(at, depth int) error {
|
|||
return yesErrors(at+1, depth)
|
||||
}
|
||||
|
||||
// GlobalE is an exported global to store the result of benchmark results,
|
||||
// preventing the compiler from optimising the benchmark functions away.
|
||||
var GlobalE error
|
||||
|
||||
func BenchmarkErrors(b *testing.B) {
|
||||
var toperr error
|
||||
type run struct {
|
||||
stack int
|
||||
std bool
|
||||
|
@ -53,7 +57,7 @@ func BenchmarkErrors(b *testing.B) {
|
|||
err = f(0, r.stack)
|
||||
}
|
||||
b.StopTimer()
|
||||
toperr = err
|
||||
GlobalE = err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -196,7 +196,6 @@ func TestWithMessage(t *testing.T) {
|
|||
t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// errors.New, etc values are not expected to be compared by value
|
||||
|
|
8
vendor/src/github.com/pkg/errors/stack.go
vendored
8
vendor/src/github.com/pkg/errors/stack.go
vendored
|
@ -79,6 +79,14 @@ func (f Frame) Format(s fmt.State, verb rune) {
|
|||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s lists source files for each Frame in the stack
|
||||
// %v lists the source file and line number for each Frame in the stack
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
|
|
7
vendor/src/github.com/pkg/profile/README.md
vendored
7
vendor/src/github.com/pkg/profile/README.md
vendored
|
@ -45,3 +45,10 @@ func main() {
|
|||
Several convenience package level values are provided for cpu, memory, and block (contention) profiling.
|
||||
|
||||
For more complex options, consult the [documentation](http://godoc.org/github.com/pkg/profile).
|
||||
|
||||
contributing
|
||||
------------
|
||||
|
||||
We welcome pull requests, bug fixes and issue reports.
|
||||
|
||||
Before proposing a change, please discuss it first by raising an issue.
|
||||
|
|
|
@ -31,7 +31,7 @@ func ExampleMemProfileRate() {
|
|||
|
||||
func ExampleProfilePath() {
|
||||
// set the location that the profile will be written to
|
||||
defer profile.Start(profile.ProfilePath(os.Getenv("HOME")))
|
||||
defer profile.Start(profile.ProfilePath(os.Getenv("HOME"))).Stop()
|
||||
}
|
||||
|
||||
func ExampleNoShutdownHook() {
|
||||
|
@ -41,13 +41,15 @@ func ExampleNoShutdownHook() {
|
|||
|
||||
func ExampleStart_withFlags() {
|
||||
// use the flags package to selectively enable profiling.
|
||||
mode := flag.String("profile.mode", "", "enable profiling mode, one of [cpu, mem, block]")
|
||||
mode := flag.String("profile.mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]")
|
||||
flag.Parse()
|
||||
switch *mode {
|
||||
case "cpu":
|
||||
defer profile.Start(profile.CPUProfile).Stop()
|
||||
case "mem":
|
||||
defer profile.Start(profile.MemProfile).Stop()
|
||||
case "mutex":
|
||||
defer profile.Start(profile.MutexProfile).Stop()
|
||||
case "block":
|
||||
defer profile.Start(profile.BlockProfile).Stop()
|
||||
default:
|
||||
|
|
13
vendor/src/github.com/pkg/profile/mutex.go
vendored
Normal file
13
vendor/src/github.com/pkg/profile/mutex.go
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// +build go1.8
|
||||
|
||||
package profile
|
||||
|
||||
import "runtime"
|
||||
|
||||
func enableMutexProfile() {
|
||||
runtime.SetMutexProfileFraction(1)
|
||||
}
|
||||
|
||||
func disableMutexProfile() {
|
||||
runtime.SetMutexProfileFraction(0)
|
||||
}
|
9
vendor/src/github.com/pkg/profile/mutex17.go
vendored
Normal file
9
vendor/src/github.com/pkg/profile/mutex17.go
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +build !go1.8
|
||||
|
||||
package profile
|
||||
|
||||
// mock mutex support for Go 1.7 and earlier.
|
||||
|
||||
func enableMutexProfile() {}
|
||||
|
||||
func disableMutexProfile() {}
|
54
vendor/src/github.com/pkg/profile/profile.go
vendored
54
vendor/src/github.com/pkg/profile/profile.go
vendored
|
@ -16,11 +16,13 @@ import (
|
|||
const (
|
||||
cpuMode = iota
|
||||
memMode
|
||||
mutexMode
|
||||
blockMode
|
||||
traceMode
|
||||
)
|
||||
|
||||
type profile struct {
|
||||
// Profile represents an active profiling session.
|
||||
type Profile struct {
|
||||
// quiet suppresses informational messages during profiling.
|
||||
quiet bool
|
||||
|
||||
|
@ -50,14 +52,14 @@ type profile struct {
|
|||
// Programs with more sophisticated signal handling should set
|
||||
// this to true and ensure the Stop() function returned from Start()
|
||||
// is called during shutdown.
|
||||
func NoShutdownHook(p *profile) { p.noShutdownHook = true }
|
||||
func NoShutdownHook(p *Profile) { p.noShutdownHook = true }
|
||||
|
||||
// Quiet suppresses informational messages during profiling.
|
||||
func Quiet(p *profile) { p.quiet = true }
|
||||
func Quiet(p *Profile) { p.quiet = true }
|
||||
|
||||
// CPUProfile enables cpu profiling.
|
||||
// It disables any previous profiling settings.
|
||||
func CPUProfile(p *profile) { p.mode = cpuMode }
|
||||
func CPUProfile(p *Profile) { p.mode = cpuMode }
|
||||
|
||||
// DefaultMemProfileRate is the default memory profiling rate.
|
||||
// See also http://golang.org/pkg/runtime/#pkg-variables
|
||||
|
@ -65,35 +67,44 @@ const DefaultMemProfileRate = 4096
|
|||
|
||||
// MemProfile enables memory profiling.
|
||||
// It disables any previous profiling settings.
|
||||
func MemProfile(p *profile) {
|
||||
func MemProfile(p *Profile) {
|
||||
p.memProfileRate = DefaultMemProfileRate
|
||||
p.mode = memMode
|
||||
}
|
||||
|
||||
// MemProfileRate enables memory profiling at the preferred rate.
|
||||
// It disables any previous profiling settings.
|
||||
func MemProfileRate(rate int) func(*profile) {
|
||||
return func(p *profile) {
|
||||
func MemProfileRate(rate int) func(*Profile) {
|
||||
return func(p *Profile) {
|
||||
p.memProfileRate = rate
|
||||
p.mode = memMode
|
||||
}
|
||||
}
|
||||
|
||||
// MutexProfile enables mutex profiling.
|
||||
// It disables any previous profiling settings.
|
||||
//
|
||||
// Mutex profiling is a no-op before go1.8.
|
||||
func MutexProfile(p *Profile) { p.mode = mutexMode }
|
||||
|
||||
// BlockProfile enables block (contention) profiling.
|
||||
// It disables any previous profiling settings.
|
||||
func BlockProfile(p *profile) { p.mode = blockMode }
|
||||
func BlockProfile(p *Profile) { p.mode = blockMode }
|
||||
|
||||
// Trace profile controls if execution tracing will be enabled. It disables any previous profiling settings.
|
||||
func TraceProfile(p *Profile) { p.mode = traceMode }
|
||||
|
||||
// ProfilePath controls the base path where various profiling
|
||||
// files are written. If blank, the base path will be generated
|
||||
// by ioutil.TempDir.
|
||||
func ProfilePath(path string) func(*profile) {
|
||||
return func(p *profile) {
|
||||
func ProfilePath(path string) func(*Profile) {
|
||||
return func(p *Profile) {
|
||||
p.path = path
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the profile and flushes any unwritten data.
|
||||
func (p *profile) Stop() {
|
||||
func (p *Profile) Stop() {
|
||||
if !atomic.CompareAndSwapUint32(&p.stopped, 0, 1) {
|
||||
// someone has already called close
|
||||
return
|
||||
|
@ -108,14 +119,14 @@ var started uint32
|
|||
// Start starts a new profiling session.
|
||||
// The caller should call the Stop method on the value returned
|
||||
// to cleanly stop profiling.
|
||||
func Start(options ...func(*profile)) interface {
|
||||
func Start(options ...func(*Profile)) interface {
|
||||
Stop()
|
||||
} {
|
||||
if !atomic.CompareAndSwapUint32(&started, 0, 1) {
|
||||
log.Fatal("profile: Start() already called")
|
||||
}
|
||||
|
||||
var prof profile
|
||||
var prof Profile
|
||||
for _, option := range options {
|
||||
option(&prof)
|
||||
}
|
||||
|
@ -168,6 +179,23 @@ func Start(options ...func(*profile)) interface {
|
|||
logf("profile: memory profiling disabled, %s", fn)
|
||||
}
|
||||
|
||||
case mutexMode:
|
||||
fn := filepath.Join(path, "mutex.pprof")
|
||||
f, err := os.Create(fn)
|
||||
if err != nil {
|
||||
log.Fatalf("profile: could not create mutex profile %q: %v", fn, err)
|
||||
}
|
||||
enableMutexProfile()
|
||||
logf("profile: mutex profiling enabled, %s", fn)
|
||||
prof.closer = func() {
|
||||
if mp := pprof.Lookup("mutex"); mp != nil {
|
||||
mp.WriteTo(f, 0)
|
||||
}
|
||||
f.Close()
|
||||
disableMutexProfile()
|
||||
logf("profile: mutex profiling disabled, %s", fn)
|
||||
}
|
||||
|
||||
case blockMode:
|
||||
fn := filepath.Join(path, "block.pprof")
|
||||
f, err := os.Create(fn)
|
||||
|
|
|
@ -14,11 +14,18 @@ import (
|
|||
|
||||
type checkFn func(t *testing.T, stdout, stderr []byte, err error)
|
||||
|
||||
var profileTests = []struct {
|
||||
func TestProfile(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "profile_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
var profileTests = []struct {
|
||||
name string
|
||||
code string
|
||||
checks []checkFn
|
||||
}{{
|
||||
}{{
|
||||
name: "default profile (cpu)",
|
||||
code: `
|
||||
package main
|
||||
|
@ -34,7 +41,7 @@ func main() {
|
|||
Stderr("profile: cpu profiling enabled"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "memory profile",
|
||||
code: `
|
||||
package main
|
||||
|
@ -50,7 +57,7 @@ func main() {
|
|||
Stderr("profile: memory profiling enabled"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "memory profile (rate 2048)",
|
||||
code: `
|
||||
package main
|
||||
|
@ -66,7 +73,7 @@ func main() {
|
|||
Stderr("profile: memory profiling enabled (rate 2048)"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "double start",
|
||||
code: `
|
||||
package main
|
||||
|
@ -83,7 +90,7 @@ func main() {
|
|||
Stderr("cpu profiling enabled", "profile: Start() already called"),
|
||||
Err,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "block profile",
|
||||
code: `
|
||||
package main
|
||||
|
@ -99,7 +106,23 @@ func main() {
|
|||
Stderr("profile: block profiling enabled"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "mutex profile",
|
||||
code: `
|
||||
package main
|
||||
|
||||
import "github.com/pkg/profile"
|
||||
|
||||
func main() {
|
||||
defer profile.Start(profile.MutexProfile).Stop()
|
||||
}
|
||||
`,
|
||||
checks: []checkFn{
|
||||
NoStdout,
|
||||
Stderr("profile: mutex profiling enabled"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
name: "profile path",
|
||||
code: `
|
||||
package main
|
||||
|
@ -115,7 +138,7 @@ func main() {
|
|||
Stderr("profile: cpu profiling enabled, cpu.pprof"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "profile path error",
|
||||
code: `
|
||||
package main
|
||||
|
@ -123,7 +146,7 @@ package main
|
|||
import "github.com/pkg/profile"
|
||||
|
||||
func main() {
|
||||
defer profile.Start(profile.ProfilePath("README.md")).Stop()
|
||||
defer profile.Start(profile.ProfilePath("` + f.Name() + `")).Stop()
|
||||
}
|
||||
`,
|
||||
checks: []checkFn{
|
||||
|
@ -131,7 +154,7 @@ func main() {
|
|||
Stderr("could not create initial output"),
|
||||
Err,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "multiple profile sessions",
|
||||
code: `
|
||||
package main
|
||||
|
@ -143,6 +166,7 @@ func main() {
|
|||
profile.Start(profile.MemProfile).Stop()
|
||||
profile.Start(profile.BlockProfile).Stop()
|
||||
profile.Start(profile.CPUProfile).Stop()
|
||||
profile.Start(profile.MutexProfile).Stop()
|
||||
}
|
||||
`,
|
||||
checks: []checkFn{
|
||||
|
@ -152,10 +176,14 @@ func main() {
|
|||
"profile: memory profiling enabled",
|
||||
"profile: memory profiling disabled",
|
||||
"profile: block profiling enabled",
|
||||
"profile: block profiling disabled"),
|
||||
"profile: block profiling disabled",
|
||||
"profile: cpu profiling enabled",
|
||||
"profile: cpu profiling disabled",
|
||||
"profile: mutex profiling enabled",
|
||||
"profile: mutex profiling disabled"),
|
||||
NoErr,
|
||||
},
|
||||
}, {
|
||||
}, {
|
||||
name: "profile quiet",
|
||||
code: `
|
||||
package main
|
||||
|
@ -167,9 +195,7 @@ func main() {
|
|||
}
|
||||
`,
|
||||
checks: []checkFn{NoStdout, NoStderr, NoErr},
|
||||
}}
|
||||
|
||||
func TestProfile(t *testing.T) {
|
||||
}}
|
||||
for _, tt := range profileTests {
|
||||
t.Log(tt.name)
|
||||
stdout, stderr, err := runTest(t, tt.code)
|
||||
|
|
3
vendor/src/github.com/pkg/profile/trace.go
vendored
3
vendor/src/github.com/pkg/profile/trace.go
vendored
|
@ -4,8 +4,5 @@ package profile
|
|||
|
||||
import "runtime/trace"
|
||||
|
||||
// Trace profile controls if execution tracing will be enabled. It disables any previous profiling settings.
|
||||
func TraceProfile(p *profile) { p.mode = traceMode }
|
||||
|
||||
var startTrace = trace.Start
|
||||
var stopTrace = trace.Stop
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
// +build go1.7
|
||||
|
||||
package profile_test
|
||||
|
||||
import "github.com/pkg/profile"
|
||||
|
|
1
vendor/src/github.com/pkg/sftp/CONTRIBUTORS
vendored
1
vendor/src/github.com/pkg/sftp/CONTRIBUTORS
vendored
|
@ -1,2 +1,3 @@
|
|||
Dave Cheney <dave@cheney.net>
|
||||
Saulius Gurklys <s4uliu5@gmail.com>
|
||||
John Eikenberry <jae@zhar.net>
|
||||
|
|
29
vendor/src/github.com/pkg/sftp/README.md
vendored
29
vendor/src/github.com/pkg/sftp/README.md
vendored
|
@ -1,27 +1,44 @@
|
|||
sftp
|
||||
----
|
||||
|
||||
The `sftp` package provides support for file system operations on remote ssh servers using the SFTP subsystem.
|
||||
The `sftp` package provides support for file system operations on remote ssh
|
||||
servers using the SFTP subsystem. It also implements an SFTP server for serving
|
||||
files from the filesystem.
|
||||
|
||||
[![UNIX Build Status](https://travis-ci.org/pkg/sftp.svg?branch=master)](https://travis-ci.org/pkg/sftp) [![GoDoc](http://godoc.org/github.com/pkg/sftp?status.svg)](http://godoc.org/github.com/pkg/sftp)
|
||||
|
||||
usage and examples
|
||||
------------------
|
||||
|
||||
See [godoc.org/github.com/pkg/sftp](http://godoc.org/github.com/pkg/sftp) for examples and usage.
|
||||
See [godoc.org/github.com/pkg/sftp](http://godoc.org/github.com/pkg/sftp) for
|
||||
examples and usage.
|
||||
|
||||
The basic operation of the package mirrors the facilities of the [os](http://golang.org/pkg/os) package.
|
||||
The basic operation of the package mirrors the facilities of the
|
||||
[os](http://golang.org/pkg/os) package.
|
||||
|
||||
The Walker interface for directory traversal is heavily inspired by Keith Rarick's [fs](http://godoc.org/github.com/kr/fs) package.
|
||||
The Walker interface for directory traversal is heavily inspired by Keith
|
||||
Rarick's [fs](http://godoc.org/github.com/kr/fs) package.
|
||||
|
||||
roadmap
|
||||
-------
|
||||
|
||||
* There is way too much duplication in the Client methods. If there was an unmarshal(interface{}) method this would reduce a heap of the duplication.
|
||||
* There is way too much duplication in the Client methods. If there was an
|
||||
unmarshal(interface{}) method this would reduce a heap of the duplication.
|
||||
|
||||
contributing
|
||||
------------
|
||||
|
||||
We welcome pull requests, bug fixes and issue reports.
|
||||
|
||||
Before proposing a large change, first please discuss your change by raising an issue.
|
||||
Before proposing a large change, first please discuss your change by raising an
|
||||
issue.
|
||||
|
||||
For API/code bugs, please include a small, self contained code example to
|
||||
reproduce the issue. For pull requests, remember test coverage.
|
||||
|
||||
We try to handle issues and pull requests with a 0 open philosophy. That means
|
||||
we will try to address the submission as soon as possible and will work toward
|
||||
a resolution. If progress can no longer be made (eg. unreproducible bug) or
|
||||
stops (eg. unresponsive submitter), we will close the bug.
|
||||
|
||||
Thanks.
|
||||
|
|
91
vendor/src/github.com/pkg/sftp/client.go
vendored
91
vendor/src/github.com/pkg/sftp/client.go
vendored
|
@ -203,7 +203,7 @@ func (c *Client) opendir(path string) (string, error) {
|
|||
handle, _ := unmarshalString(data)
|
||||
return handle, nil
|
||||
case ssh_FXP_STATUS:
|
||||
return "", unmarshalStatus(id, data)
|
||||
return "", normaliseError(unmarshalStatus(id, data))
|
||||
default:
|
||||
return "", unimplementedPacketErr(typ)
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ func (c *Client) ReadLink(p string) (string, error) {
|
|||
filename, _ := unmarshalString(data) // ignore dummy attributes
|
||||
return filename, nil
|
||||
case ssh_FXP_STATUS:
|
||||
return "", unmarshalStatus(id, data)
|
||||
return "", normaliseError(unmarshalStatus(id, data))
|
||||
default:
|
||||
return "", unimplementedPacketErr(typ)
|
||||
}
|
||||
|
@ -439,7 +439,7 @@ func (c *Client) fstat(handle string) (*FileStat, error) {
|
|||
attr, _ := unmarshalAttrs(data)
|
||||
return attr, nil
|
||||
case ssh_FXP_STATUS:
|
||||
return nil, unmarshalStatus(id, data)
|
||||
return nil, normaliseError(unmarshalStatus(id, data))
|
||||
default:
|
||||
return nil, unimplementedPacketErr(typ)
|
||||
}
|
||||
|
@ -495,7 +495,7 @@ func (c *Client) Remove(path string) error {
|
|||
// some servers, *cough* osx *cough*, return EPERM, not ENODIR.
|
||||
// serv-u returns ssh_FX_FILE_IS_A_DIRECTORY
|
||||
case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY:
|
||||
return c.removeDirectory(path)
|
||||
return c.RemoveDirectory(path)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -518,7 +518,8 @@ func (c *Client) removeFile(path string) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Client) removeDirectory(path string) error {
|
||||
// RemoveDirectory removes a directory path.
|
||||
func (c *Client) RemoveDirectory(path string) error {
|
||||
id := c.nextID()
|
||||
typ, data, err := c.sendPacket(sshFxpRmdirPacket{
|
||||
ID: id,
|
||||
|
@ -640,9 +641,10 @@ func (f *File) Name() string {
|
|||
|
||||
const maxConcurrentRequests = 64
|
||||
|
||||
// Read reads up to len(b) bytes from the File. It returns the number of
|
||||
// bytes read and an error, if any. EOF is signaled by a zero count with
|
||||
// err set to io.EOF.
|
||||
// Read reads up to len(b) bytes from the File. It returns the number of bytes
|
||||
// read and an error, if any. Read follows io.Reader semantics, so when Read
|
||||
// encounters an error or EOF condition after successfully reading n > 0 bytes,
|
||||
// it returns the number of bytes read.
|
||||
func (f *File) Read(b []byte) (int, error) {
|
||||
// Split the read into multiple maxPacket sized concurrent reads
|
||||
// bounded by maxConcurrentRequests. This allows reads with a suitably
|
||||
|
@ -651,7 +653,9 @@ func (f *File) Read(b []byte) (int, error) {
|
|||
inFlight := 0
|
||||
desiredInFlight := 1
|
||||
offset := f.offset
|
||||
ch := make(chan result, 1)
|
||||
// maxConcurrentRequests buffer to deal with broadcastErr() floods
|
||||
// also must have a buffer of max value of (desiredInFlight - inFlight)
|
||||
ch := make(chan result, maxConcurrentRequests)
|
||||
type inflightRead struct {
|
||||
b []byte
|
||||
offset uint64
|
||||
|
@ -688,18 +692,17 @@ func (f *File) Read(b []byte) (int, error) {
|
|||
if inFlight == 0 {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case res := <-ch:
|
||||
res := <-ch
|
||||
inFlight--
|
||||
if res.err != nil {
|
||||
firstErr = offsetErr{offset: 0, err: res.err}
|
||||
break
|
||||
continue
|
||||
}
|
||||
reqID, data := unmarshalUint32(res.data)
|
||||
req, ok := reqs[reqID]
|
||||
if !ok {
|
||||
firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)}
|
||||
break
|
||||
continue
|
||||
}
|
||||
delete(reqs, reqID)
|
||||
switch res.typ {
|
||||
|
@ -709,7 +712,6 @@ func (f *File) Read(b []byte) (int, error) {
|
|||
offset: req.offset,
|
||||
err: normaliseError(unmarshalStatus(reqID, res.data)),
|
||||
}
|
||||
break
|
||||
}
|
||||
case ssh_FXP_DATA:
|
||||
l, data := unmarshalUint32(data)
|
||||
|
@ -723,8 +725,6 @@ func (f *File) Read(b []byte) (int, error) {
|
|||
}
|
||||
default:
|
||||
firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the error is anything other than EOF, then there
|
||||
|
@ -750,7 +750,8 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
offset := f.offset
|
||||
writeOffset := offset
|
||||
fileSize := uint64(fi.Size())
|
||||
ch := make(chan result, 1)
|
||||
// see comment on same line in Read() above
|
||||
ch := make(chan result, maxConcurrentRequests)
|
||||
type inflightRead struct {
|
||||
b []byte
|
||||
offset uint64
|
||||
|
@ -777,7 +778,8 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
|
||||
var copied int64
|
||||
for firstErr.err == nil || inFlight > 0 {
|
||||
for inFlight < desiredInFlight && firstErr.err == nil {
|
||||
if firstErr.err == nil {
|
||||
for inFlight+len(pendingWrites) < desiredInFlight {
|
||||
b := make([]byte, f.c.maxPacket)
|
||||
sendReq(b, offset)
|
||||
offset += uint64(f.c.maxPacket)
|
||||
|
@ -785,29 +787,31 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
desiredInFlight = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inFlight == 0 {
|
||||
if firstErr.err == nil && len(pendingWrites) > 0 {
|
||||
return copied, errors.New("internal inconsistency")
|
||||
}
|
||||
break
|
||||
}
|
||||
select {
|
||||
case res := <-ch:
|
||||
res := <-ch
|
||||
inFlight--
|
||||
if res.err != nil {
|
||||
firstErr = offsetErr{offset: 0, err: res.err}
|
||||
break
|
||||
continue
|
||||
}
|
||||
reqID, data := unmarshalUint32(res.data)
|
||||
req, ok := reqs[reqID]
|
||||
if !ok {
|
||||
firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)}
|
||||
break
|
||||
continue
|
||||
}
|
||||
delete(reqs, reqID)
|
||||
switch res.typ {
|
||||
case ssh_FXP_STATUS:
|
||||
if firstErr.err == nil || req.offset < firstErr.offset {
|
||||
firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))}
|
||||
break
|
||||
}
|
||||
case ssh_FXP_DATA:
|
||||
l, data := unmarshalUint32(data)
|
||||
|
@ -815,6 +819,8 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
nbytes, err := w.Write(data)
|
||||
copied += int64(nbytes)
|
||||
if err != nil {
|
||||
// We will never receive another DATA with offset==writeOffset, so
|
||||
// the loop will drain inFlight and then exit.
|
||||
firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err}
|
||||
break
|
||||
}
|
||||
|
@ -829,8 +835,16 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
desiredInFlight++
|
||||
}
|
||||
writeOffset += uint64(nbytes)
|
||||
for pendingData, ok := pendingWrites[writeOffset]; ok; pendingData, ok = pendingWrites[writeOffset] {
|
||||
for {
|
||||
pendingData, ok := pendingWrites[writeOffset]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
// Give go a chance to free the memory.
|
||||
delete(pendingWrites, writeOffset)
|
||||
nbytes, err := w.Write(pendingData)
|
||||
// Do not move writeOffset on error so subsequent iterations won't trigger
|
||||
// any writes.
|
||||
if err != nil {
|
||||
firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err}
|
||||
break
|
||||
|
@ -840,20 +854,16 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
|
|||
break
|
||||
}
|
||||
writeOffset += uint64(nbytes)
|
||||
inFlight--
|
||||
}
|
||||
} else {
|
||||
// Don't write the data yet because
|
||||
// this response came in out of order
|
||||
// and we need to wait for responses
|
||||
// for earlier segments of the file.
|
||||
inFlight++ // Pending writes should still be considered inFlight.
|
||||
pendingWrites[req.offset] = data
|
||||
}
|
||||
default:
|
||||
firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if firstErr.err != io.EOF {
|
||||
|
@ -883,7 +893,8 @@ func (f *File) Write(b []byte) (int, error) {
|
|||
inFlight := 0
|
||||
desiredInFlight := 1
|
||||
offset := f.offset
|
||||
ch := make(chan result, 1)
|
||||
// see comment on same line in Read() above
|
||||
ch := make(chan result, maxConcurrentRequests)
|
||||
var firstErr error
|
||||
written := len(b)
|
||||
for len(b) > 0 || inFlight > 0 {
|
||||
|
@ -905,12 +916,11 @@ func (f *File) Write(b []byte) (int, error) {
|
|||
if inFlight == 0 {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case res := <-ch:
|
||||
res := <-ch
|
||||
inFlight--
|
||||
if res.err != nil {
|
||||
firstErr = res.err
|
||||
break
|
||||
continue
|
||||
}
|
||||
switch res.typ {
|
||||
case ssh_FXP_STATUS:
|
||||
|
@ -925,8 +935,6 @@ func (f *File) Write(b []byte) (int, error) {
|
|||
}
|
||||
default:
|
||||
firstErr = unimplementedPacketErr(res.typ)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// If error is non-nil, then there may be gaps in the data written to
|
||||
|
@ -946,7 +954,8 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) {
|
|||
inFlight := 0
|
||||
desiredInFlight := 1
|
||||
offset := f.offset
|
||||
ch := make(chan result, 1)
|
||||
// see comment on same line in Read() above
|
||||
ch := make(chan result, maxConcurrentRequests)
|
||||
var firstErr error
|
||||
read := int64(0)
|
||||
b := make([]byte, f.c.maxPacket)
|
||||
|
@ -971,12 +980,11 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) {
|
|||
if inFlight == 0 {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case res := <-ch:
|
||||
res := <-ch
|
||||
inFlight--
|
||||
if res.err != nil {
|
||||
firstErr = res.err
|
||||
break
|
||||
continue
|
||||
}
|
||||
switch res.typ {
|
||||
case ssh_FXP_STATUS:
|
||||
|
@ -991,8 +999,6 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) {
|
|||
}
|
||||
default:
|
||||
firstErr = unimplementedPacketErr(res.typ)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if firstErr == io.EOF {
|
||||
|
@ -1080,10 +1086,7 @@ func unmarshalStatus(id uint32, data []byte) error {
|
|||
return &unexpectedIDErr{id, sid}
|
||||
}
|
||||
code, data := unmarshalUint32(data)
|
||||
msg, data, err := unmarshalStringSafe(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg, data, _ := unmarshalStringSafe(data)
|
||||
lang, _, _ := unmarshalStringSafe(data)
|
||||
return &StatusError{
|
||||
Code: code,
|
||||
|
|
|
@ -29,14 +29,14 @@ func TestClientStatVFS(t *testing.T) {
|
|||
|
||||
// check some stats
|
||||
if vfs.Frsize != uint64(s.Frsize) {
|
||||
t.Fatal("fr_size does not match, expected: %v, got: %v", s.Frsize, vfs.Frsize)
|
||||
t.Fatalf("fr_size does not match, expected: %v, got: %v", s.Frsize, vfs.Frsize)
|
||||
}
|
||||
|
||||
if vfs.Bsize != uint64(s.Bsize) {
|
||||
t.Fatal("f_bsize does not match, expected: %v, got: %v", s.Bsize, vfs.Bsize)
|
||||
t.Fatalf("f_bsize does not match, expected: %v, got: %v", s.Bsize, vfs.Bsize)
|
||||
}
|
||||
|
||||
if vfs.Namemax != uint64(s.Namelen) {
|
||||
t.Fatal("f_namemax does not match, expected: %v, got: %v", s.Namelen, vfs.Namemax)
|
||||
t.Fatalf("f_namemax does not match, expected: %v, got: %v", s.Namelen, vfs.Namemax)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,10 @@ package sftp
|
|||
// enable with -integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"errors"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -22,6 +25,8 @@ import (
|
|||
"testing/quick"
|
||||
"time"
|
||||
|
||||
"sort"
|
||||
|
||||
"github.com/kr/fs"
|
||||
)
|
||||
|
||||
|
@ -87,7 +92,7 @@ func (w delayedWriter) Close() error {
|
|||
|
||||
// netPipe provides a pair of io.ReadWriteClosers connected to each other.
|
||||
// The functions is identical to os.Pipe with the exception that netPipe
|
||||
// provides the Read/Close guarentees that os.File derrived pipes do not.
|
||||
// provides the Read/Close guarantees that os.File derrived pipes do not.
|
||||
func netPipe(t testing.TB) (io.ReadWriteCloser, io.ReadWriteCloser) {
|
||||
type result struct {
|
||||
net.Conn
|
||||
|
@ -1159,6 +1164,197 @@ func TestClientWrite(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// ReadFrom is basically Write with io.Reader as the arg
|
||||
func TestClientReadFrom(t *testing.T) {
|
||||
sftp, cmd := testClient(t, READWRITE, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
|
||||
d, err := ioutil.TempDir("", "sftptest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
f := path.Join(d, "writeTest")
|
||||
w, err := sftp.Create(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
for _, tt := range clientWriteTests {
|
||||
got, err := w.ReadFrom(bytes.NewReader(make([]byte, tt.n)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != int64(tt.n) {
|
||||
t.Errorf("Write(%v): wrote: want: %v, got %v", tt.n, tt.n, got)
|
||||
}
|
||||
fi, err := os.Stat(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if total := fi.Size(); total != tt.total {
|
||||
t.Errorf("Write(%v): size: want: %v, got %v", tt.n, tt.total, total)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Issue #145 in github
|
||||
// Deadlock in ReadFrom when network drops after 1 good packet.
|
||||
// Deadlock would occur anytime desiredInFlight-inFlight==2 and 2 errors
|
||||
// occured in a row. The channel to report the errors only had a buffer
|
||||
// of 1 and 2 would be sent.
|
||||
var fakeNetErr = errors.New("Fake network issue")
|
||||
|
||||
func TestClientReadFromDeadlock(t *testing.T) {
|
||||
clientWriteDeadlock(t, 1, func(f *File) {
|
||||
b := make([]byte, 32768*4)
|
||||
content := bytes.NewReader(b)
|
||||
n, err := f.ReadFrom(content)
|
||||
if n != 0 {
|
||||
t.Fatal("Write should return 0", n)
|
||||
}
|
||||
if err != fakeNetErr {
|
||||
t.Fatal("Didn't recieve correct error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Write has exact same problem
|
||||
func TestClientWriteDeadlock(t *testing.T) {
|
||||
clientWriteDeadlock(t, 1, func(f *File) {
|
||||
b := make([]byte, 32768*4)
|
||||
n, err := f.Write(b)
|
||||
if n != 0 {
|
||||
t.Fatal("Write should return 0", n)
|
||||
}
|
||||
if err != fakeNetErr {
|
||||
t.Fatal("Didn't recieve correct error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// shared body for both previous tests
|
||||
func clientWriteDeadlock(t *testing.T, N int, badfunc func(*File)) {
|
||||
if !*testServerImpl {
|
||||
t.Skipf("skipping without -testserver")
|
||||
}
|
||||
sftp, cmd := testClient(t, READWRITE, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
|
||||
d, err := ioutil.TempDir("", "sftptest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
f := path.Join(d, "writeTest")
|
||||
w, err := sftp.Create(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
// Override sendPacket with failing version
|
||||
// Replicates network error/drop part way through (after 1 good packet)
|
||||
count := 0
|
||||
sendPacketTest := func(w io.Writer, m encoding.BinaryMarshaler) error {
|
||||
count++
|
||||
if count > N {
|
||||
return fakeNetErr
|
||||
}
|
||||
return sendPacket(w, m)
|
||||
}
|
||||
sftp.clientConn.conn.sendPacketTest = sendPacketTest
|
||||
defer func() {
|
||||
sftp.clientConn.conn.sendPacketTest = nil
|
||||
}()
|
||||
|
||||
// this locked (before the fix)
|
||||
badfunc(w)
|
||||
}
|
||||
|
||||
// Read/WriteTo has this issue as well
|
||||
func TestClientReadDeadlock(t *testing.T) {
|
||||
clientReadDeadlock(t, 1, func(f *File) {
|
||||
b := make([]byte, 32768*4)
|
||||
n, err := f.Read(b)
|
||||
if n != 0 {
|
||||
t.Fatal("Write should return 0", n)
|
||||
}
|
||||
if err != fakeNetErr {
|
||||
t.Fatal("Didn't recieve correct error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClientWriteToDeadlock(t *testing.T) {
|
||||
clientReadDeadlock(t, 2, func(f *File) {
|
||||
b := make([]byte, 32768*4)
|
||||
buf := bytes.NewBuffer(b)
|
||||
n, err := f.WriteTo(buf)
|
||||
if n != 32768 {
|
||||
t.Fatal("Write should return 0", n)
|
||||
}
|
||||
if err != fakeNetErr {
|
||||
t.Fatal("Didn't recieve correct error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func clientReadDeadlock(t *testing.T, N int, badfunc func(*File)) {
|
||||
if !*testServerImpl {
|
||||
t.Skipf("skipping without -testserver")
|
||||
}
|
||||
sftp, cmd := testClient(t, READWRITE, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
|
||||
d, err := ioutil.TempDir("", "sftptest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
f := path.Join(d, "writeTest")
|
||||
w, err := sftp.Create(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// write the data for the read tests
|
||||
b := make([]byte, 32768*4)
|
||||
w.Write(b)
|
||||
defer w.Close()
|
||||
|
||||
// open new copy of file for read tests
|
||||
r, err := sftp.Open(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Override sendPacket with failing version
|
||||
// Replicates network error/drop part way through (after 1 good packet)
|
||||
count := 0
|
||||
sendPacketTest := func(w io.Writer, m encoding.BinaryMarshaler) error {
|
||||
count++
|
||||
if count > N {
|
||||
return fakeNetErr
|
||||
}
|
||||
return sendPacket(w, m)
|
||||
}
|
||||
sftp.clientConn.conn.sendPacketTest = sendPacketTest
|
||||
defer func() {
|
||||
sftp.clientConn.conn.sendPacketTest = nil
|
||||
}()
|
||||
|
||||
// this locked (before the fix)
|
||||
badfunc(r)
|
||||
}
|
||||
|
||||
// taken from github.com/kr/fs/walk_test.go
|
||||
|
||||
type Node struct {
|
||||
|
@ -1330,6 +1526,169 @@ func TestClientWalk(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type MatchTest struct {
|
||||
pattern, s string
|
||||
match bool
|
||||
err error
|
||||
}
|
||||
|
||||
var matchTests = []MatchTest{
|
||||
{"abc", "abc", true, nil},
|
||||
{"*", "abc", true, nil},
|
||||
{"*c", "abc", true, nil},
|
||||
{"a*", "a", true, nil},
|
||||
{"a*", "abc", true, nil},
|
||||
{"a*", "ab/c", false, nil},
|
||||
{"a*/b", "abc/b", true, nil},
|
||||
{"a*/b", "a/c/b", false, nil},
|
||||
{"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil},
|
||||
{"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil},
|
||||
{"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil},
|
||||
{"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil},
|
||||
{"a*b?c*x", "abxbbxdbxebxczzx", true, nil},
|
||||
{"a*b?c*x", "abxbbxdbxebxczzy", false, nil},
|
||||
{"ab[c]", "abc", true, nil},
|
||||
{"ab[b-d]", "abc", true, nil},
|
||||
{"ab[e-g]", "abc", false, nil},
|
||||
{"ab[^c]", "abc", false, nil},
|
||||
{"ab[^b-d]", "abc", false, nil},
|
||||
{"ab[^e-g]", "abc", true, nil},
|
||||
{"a\\*b", "a*b", true, nil},
|
||||
{"a\\*b", "ab", false, nil},
|
||||
{"a?b", "a☺b", true, nil},
|
||||
{"a[^a]b", "a☺b", true, nil},
|
||||
{"a???b", "a☺b", false, nil},
|
||||
{"a[^a][^a][^a]b", "a☺b", false, nil},
|
||||
{"[a-ζ]*", "α", true, nil},
|
||||
{"*[a-ζ]", "A", false, nil},
|
||||
{"a?b", "a/b", false, nil},
|
||||
{"a*b", "a/b", false, nil},
|
||||
{"[\\]a]", "]", true, nil},
|
||||
{"[\\-]", "-", true, nil},
|
||||
{"[x\\-]", "x", true, nil},
|
||||
{"[x\\-]", "-", true, nil},
|
||||
{"[x\\-]", "z", false, nil},
|
||||
{"[\\-x]", "x", true, nil},
|
||||
{"[\\-x]", "-", true, nil},
|
||||
{"[\\-x]", "a", false, nil},
|
||||
{"[]a]", "]", false, ErrBadPattern},
|
||||
{"[-]", "-", false, ErrBadPattern},
|
||||
{"[x-]", "x", false, ErrBadPattern},
|
||||
{"[x-]", "-", false, ErrBadPattern},
|
||||
{"[x-]", "z", false, ErrBadPattern},
|
||||
{"[-x]", "x", false, ErrBadPattern},
|
||||
{"[-x]", "-", false, ErrBadPattern},
|
||||
{"[-x]", "a", false, ErrBadPattern},
|
||||
{"\\", "a", false, ErrBadPattern},
|
||||
{"[a-b-c]", "a", false, ErrBadPattern},
|
||||
{"[", "a", false, ErrBadPattern},
|
||||
{"[^", "a", false, ErrBadPattern},
|
||||
{"[^bc", "a", false, ErrBadPattern},
|
||||
{"a[", "a", false, nil},
|
||||
{"a[", "ab", false, ErrBadPattern},
|
||||
{"*x", "xxx", true, nil},
|
||||
}
|
||||
|
||||
func errp(e error) string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// contains returns true if vector contains the string s.
|
||||
func contains(vector []string, s string) bool {
|
||||
for _, elem := range vector {
|
||||
if elem == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var globTests = []struct {
|
||||
pattern, result string
|
||||
}{
|
||||
{"match.go", "./match.go"},
|
||||
{"mat?h.go", "./match.go"},
|
||||
{"ma*ch.go", "./match.go"},
|
||||
{"../*/match.go", "../sftp/match.go"},
|
||||
}
|
||||
|
||||
type globTest struct {
|
||||
pattern string
|
||||
matches []string
|
||||
}
|
||||
|
||||
func (test *globTest) buildWant(root string) []string {
|
||||
var want []string
|
||||
for _, m := range test.matches {
|
||||
want = append(want, root+filepath.FromSlash(m))
|
||||
}
|
||||
sort.Strings(want)
|
||||
return want
|
||||
}
|
||||
|
||||
func TestMatch(t *testing.T) {
|
||||
for _, tt := range matchTests {
|
||||
pattern := tt.pattern
|
||||
s := tt.s
|
||||
ok, err := Match(pattern, s)
|
||||
if ok != tt.match || err != tt.err {
|
||||
t.Errorf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlob(t *testing.T) {
|
||||
sftp, cmd := testClient(t, READONLY, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
|
||||
for _, tt := range globTests {
|
||||
pattern := tt.pattern
|
||||
result := tt.result
|
||||
matches, err := sftp.Glob(pattern)
|
||||
if err != nil {
|
||||
t.Errorf("Glob error for %q: %s", pattern, err)
|
||||
continue
|
||||
}
|
||||
if !contains(matches, result) {
|
||||
t.Errorf("Glob(%#q) = %#v want %v", pattern, matches, result)
|
||||
}
|
||||
}
|
||||
for _, pattern := range []string{"no_match", "../*/no_match"} {
|
||||
matches, err := sftp.Glob(pattern)
|
||||
if err != nil {
|
||||
t.Errorf("Glob error for %q: %s", pattern, err)
|
||||
continue
|
||||
}
|
||||
if len(matches) != 0 {
|
||||
t.Errorf("Glob(%#q) = %#v want []", pattern, matches)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobError(t *testing.T) {
|
||||
sftp, cmd := testClient(t, READONLY, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
|
||||
_, err := sftp.Glob("[7]")
|
||||
if err != nil {
|
||||
t.Error("expected error for bad pattern; got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobUNC(t *testing.T) {
|
||||
sftp, cmd := testClient(t, READONLY, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
// Just make sure this runs without crashing for now.
|
||||
// See issue 15879.
|
||||
sftp.Glob(`\\?\C:\*`)
|
||||
}
|
||||
|
||||
// sftp/issue/42, abrupt server hangup would result in client hangs.
|
||||
func TestServerRoughDisconnect(t *testing.T) {
|
||||
if *testServerImpl {
|
||||
|
@ -1352,6 +1711,35 @@ func TestServerRoughDisconnect(t *testing.T) {
|
|||
io.Copy(ioutil.Discard, f)
|
||||
}
|
||||
|
||||
// sftp/issue/181, abrupt server hangup would result in client hangs.
|
||||
// due to broadcastErr filling up the request channel
|
||||
// this reproduces it about 50% of the time
|
||||
func TestServerRoughDisconnect2(t *testing.T) {
|
||||
if *testServerImpl {
|
||||
t.Skipf("skipping with -testserver")
|
||||
}
|
||||
sftp, cmd := testClient(t, READONLY, NO_DELAY)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
|
||||
f, err := sftp.Open("/dev/zero")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
b := make([]byte, 32768*100)
|
||||
go func() {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
cmd.Process.Kill()
|
||||
}()
|
||||
for {
|
||||
_, err = f.Read(b)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sftp/issue/26 writing to a read only file caused client to loop.
|
||||
func TestClientWriteToROFile(t *testing.T) {
|
||||
sftp, cmd := testClient(t, READWRITE, NO_DELAY)
|
||||
|
@ -1375,7 +1763,7 @@ func benchmarkRead(b *testing.B, bufsize int, delay time.Duration) {
|
|||
// open sftp client
|
||||
sftp, cmd := testClient(b, READONLY, delay)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
// defer sftp.Close()
|
||||
|
||||
buf := make([]byte, bufsize)
|
||||
|
||||
|
@ -1453,7 +1841,7 @@ func benchmarkWrite(b *testing.B, bufsize int, delay time.Duration) {
|
|||
// open sftp client
|
||||
sftp, cmd := testClient(b, false, delay)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
// defer sftp.Close()
|
||||
|
||||
data := make([]byte, size)
|
||||
|
||||
|
@ -1543,6 +1931,88 @@ func BenchmarkWrite4MiBDelay150Msec(b *testing.B) {
|
|||
benchmarkWrite(b, 4*1024*1024, 150*time.Millisecond)
|
||||
}
|
||||
|
||||
func benchmarkReadFrom(b *testing.B, bufsize int, delay time.Duration) {
|
||||
size := 10*1024*1024 + 123 // ~10MiB
|
||||
|
||||
// open sftp client
|
||||
sftp, cmd := testClient(b, false, delay)
|
||||
defer cmd.Wait()
|
||||
// defer sftp.Close()
|
||||
|
||||
data := make([]byte, size)
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(size))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
f, err := ioutil.TempFile("", "sftptest")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
f2, err := sftp.Create(f.Name())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer f2.Close()
|
||||
|
||||
f2.ReadFrom(bytes.NewReader(data))
|
||||
f2.Close()
|
||||
|
||||
fi, err := os.Stat(f.Name())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if fi.Size() != int64(size) {
|
||||
b.Fatalf("wrong file size: want %d, got %d", size, fi.Size())
|
||||
}
|
||||
|
||||
os.Remove(f.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom1k(b *testing.B) {
|
||||
benchmarkReadFrom(b, 1*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom16k(b *testing.B) {
|
||||
benchmarkReadFrom(b, 16*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom32k(b *testing.B) {
|
||||
benchmarkReadFrom(b, 32*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom128k(b *testing.B) {
|
||||
benchmarkReadFrom(b, 128*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom512k(b *testing.B) {
|
||||
benchmarkReadFrom(b, 512*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom1MiB(b *testing.B) {
|
||||
benchmarkReadFrom(b, 1024*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom4MiB(b *testing.B) {
|
||||
benchmarkReadFrom(b, 4*1024*1024, NO_DELAY)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom4MiBDelay10Msec(b *testing.B) {
|
||||
benchmarkReadFrom(b, 4*1024*1024, 10*time.Millisecond)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom4MiBDelay50Msec(b *testing.B) {
|
||||
benchmarkReadFrom(b, 4*1024*1024, 50*time.Millisecond)
|
||||
}
|
||||
|
||||
func BenchmarkReadFrom4MiBDelay150Msec(b *testing.B) {
|
||||
benchmarkReadFrom(b, 4*1024*1024, 150*time.Millisecond)
|
||||
}
|
||||
|
||||
func benchmarkCopyDown(b *testing.B, fileSize int64, delay time.Duration) {
|
||||
// Create a temp file and fill it with zero's.
|
||||
src, err := ioutil.TempFile("", "sftptest")
|
||||
|
@ -1568,7 +2038,7 @@ func benchmarkCopyDown(b *testing.B, fileSize int64, delay time.Duration) {
|
|||
|
||||
sftp, cmd := testClient(b, READONLY, delay)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
// defer sftp.Close()
|
||||
b.ResetTimer()
|
||||
b.SetBytes(fileSize)
|
||||
|
||||
|
@ -1641,7 +2111,7 @@ func benchmarkCopyUp(b *testing.B, fileSize int64, delay time.Duration) {
|
|||
|
||||
sftp, cmd := testClient(b, false, delay)
|
||||
defer cmd.Wait()
|
||||
defer sftp.Close()
|
||||
// defer sftp.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(fileSize)
|
||||
|
|
|
@ -116,7 +116,9 @@ func TestUnmarshalStatus(t *testing.T) {
|
|||
desc: "missing error message and language tag",
|
||||
reqID: 1,
|
||||
status: idCode,
|
||||
want: errShortPacket,
|
||||
want: &StatusError{
|
||||
Code: ssh_FX_FAILURE,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "missing language tag",
|
||||
|
|
17
vendor/src/github.com/pkg/sftp/conn.go
vendored
17
vendor/src/github.com/pkg/sftp/conn.go
vendored
|
@ -14,6 +14,8 @@ type conn struct {
|
|||
io.Reader
|
||||
io.WriteCloser
|
||||
sync.Mutex // used to serialise writes to sendPacket
|
||||
// sendPacketTest is needed to replicate packet issues in testing
|
||||
sendPacketTest func(w io.Writer, m encoding.BinaryMarshaler) error
|
||||
}
|
||||
|
||||
func (c *conn) recvPacket() (uint8, []byte, error) {
|
||||
|
@ -23,6 +25,9 @@ func (c *conn) recvPacket() (uint8, []byte, error) {
|
|||
func (c *conn) sendPacket(m encoding.BinaryMarshaler) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.sendPacketTest != nil {
|
||||
return c.sendPacketTest(c, m)
|
||||
}
|
||||
return sendPacket(c, m)
|
||||
}
|
||||
|
||||
|
@ -50,7 +55,11 @@ func (c *clientConn) loop() {
|
|||
// recv continuously reads from the server and forwards responses to the
|
||||
// appropriate channel.
|
||||
func (c *clientConn) recv() error {
|
||||
defer c.conn.Close()
|
||||
defer func() {
|
||||
c.conn.Lock()
|
||||
c.conn.Close()
|
||||
c.conn.Unlock()
|
||||
}()
|
||||
for {
|
||||
typ, data, err := c.recvPacket()
|
||||
if err != nil {
|
||||
|
@ -93,11 +102,13 @@ func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) {
|
|||
func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) {
|
||||
c.Lock()
|
||||
c.inflight[p.id()] = ch
|
||||
c.Unlock()
|
||||
if err := c.conn.sendPacket(p); err != nil {
|
||||
c.Lock()
|
||||
delete(c.inflight, p.id())
|
||||
c.Unlock()
|
||||
ch <- result{err: err}
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// broadcastErr sends an error to all goroutines waiting for a response.
|
||||
|
@ -117,6 +128,6 @@ type serverConn struct {
|
|||
conn
|
||||
}
|
||||
|
||||
func (s *serverConn) sendError(p id, err error) error {
|
||||
func (s *serverConn) sendError(p ider, err error) error {
|
||||
return s.sendPacket(statusFromError(p, err))
|
||||
}
|
||||
|
|
47
vendor/src/github.com/pkg/sftp/example_test.go
vendored
47
vendor/src/github.com/pkg/sftp/example_test.go
vendored
|
@ -5,12 +5,16 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
func Example(conn *ssh.Client) {
|
||||
func Example() {
|
||||
var conn *ssh.Client
|
||||
|
||||
// open an SFTP session over an existing ssh connection.
|
||||
sftp, err := sftp.NewClient(conn)
|
||||
if err != nil {
|
||||
|
@ -88,3 +92,44 @@ func ExampleNewClientPipe() {
|
|||
// close the connection
|
||||
client.Close()
|
||||
}
|
||||
|
||||
func ExampleClient_Mkdir_parents() {
|
||||
// Example of mimicing 'mkdir --parents'; I.E. recursively create
|
||||
// directoryies and don't error if any directories already exists.
|
||||
var conn *ssh.Client
|
||||
|
||||
client, err := sftp.NewClient(conn)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ssh_fx_failure := uint32(4)
|
||||
mkdirParents := func(client *sftp.Client, dir string) (err error) {
|
||||
var parents string
|
||||
for _, name := range strings.Split(dir, "/") {
|
||||
parents = path.Join(parents, name)
|
||||
err = client.Mkdir(parents)
|
||||
if status, ok := err.(*sftp.StatusError); ok {
|
||||
if status.Code == ssh_fx_failure {
|
||||
var fi os.FileInfo
|
||||
fi, err = client.Stat(parents)
|
||||
if err == nil {
|
||||
if !fi.IsDir() {
|
||||
return fmt.Errorf("File exists: %s", parents)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = mkdirParents(client, "/tmp/foo/bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ func main() {
|
|||
config := ssh.ClientConfig{
|
||||
User: *USER,
|
||||
Auth: auths,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", *HOST, *PORT)
|
||||
conn, err := ssh.Dial("tcp", addr, &config)
|
||||
|
|
|
@ -42,6 +42,7 @@ func main() {
|
|||
config := ssh.ClientConfig{
|
||||
User: *USER,
|
||||
Auth: auths,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", *HOST, *PORT)
|
||||
conn, err := ssh.Dial("tcp", addr, &config)
|
||||
|
|
131
vendor/src/github.com/pkg/sftp/examples/request-server/main.go
vendored
Normal file
131
vendor/src/github.com/pkg/sftp/examples/request-server/main.go
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
// An example SFTP server implementation using the golang SSH package.
|
||||
// Serves the whole filesystem visible to the user, and has a hard-coded username and password,
|
||||
// so not for real use!
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// Based on example server code from golang.org/x/crypto/ssh and server_standalone
|
||||
func main() {
|
||||
|
||||
var (
|
||||
readOnly bool
|
||||
debugStderr bool
|
||||
)
|
||||
|
||||
flag.BoolVar(&readOnly, "R", false, "read-only server")
|
||||
flag.BoolVar(&debugStderr, "e", false, "debug to stderr")
|
||||
flag.Parse()
|
||||
|
||||
debugStream := ioutil.Discard
|
||||
if debugStderr {
|
||||
debugStream = os.Stderr
|
||||
}
|
||||
|
||||
// An SSH server is represented by a ServerConfig, which holds
|
||||
// certificate details and handles authentication of ServerConns.
|
||||
config := &ssh.ServerConfig{
|
||||
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
||||
// Should use constant-time compare (or better, salt+hash) in
|
||||
// a production setting.
|
||||
fmt.Fprintf(debugStream, "Login: %s\n", c.User())
|
||||
if c.User() == "testuser" && string(pass) == "tiger" {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("password rejected for %q", c.User())
|
||||
},
|
||||
}
|
||||
|
||||
privateBytes, err := ioutil.ReadFile("id_rsa")
|
||||
if err != nil {
|
||||
log.Fatal("Failed to load private key", err)
|
||||
}
|
||||
|
||||
private, err := ssh.ParsePrivateKey(privateBytes)
|
||||
if err != nil {
|
||||
log.Fatal("Failed to parse private key", err)
|
||||
}
|
||||
|
||||
config.AddHostKey(private)
|
||||
|
||||
// Once a ServerConfig has been configured, connections can be
|
||||
// accepted.
|
||||
listener, err := net.Listen("tcp", "0.0.0.0:2022")
|
||||
if err != nil {
|
||||
log.Fatal("failed to listen for connection", err)
|
||||
}
|
||||
fmt.Printf("Listening on %v\n", listener.Addr())
|
||||
|
||||
nConn, err := listener.Accept()
|
||||
if err != nil {
|
||||
log.Fatal("failed to accept incoming connection", err)
|
||||
}
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sconn, chans, reqs, err := ssh.NewServerConn(nConn, config)
|
||||
if err != nil {
|
||||
log.Fatal("failed to handshake", err)
|
||||
}
|
||||
log.Println("login detected:", sconn.User())
|
||||
fmt.Fprintf(debugStream, "SSH server established\n")
|
||||
|
||||
// The incoming Request channel must be serviced.
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
// Service the incoming Channel channel.
|
||||
for newChannel := range chans {
|
||||
// Channels have a type, depending on the application level
|
||||
// protocol intended. In the case of an SFTP session, this is "subsystem"
|
||||
// with a payload string of "<length=4>sftp"
|
||||
fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType())
|
||||
if newChannel.ChannelType() != "session" {
|
||||
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
|
||||
fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType())
|
||||
continue
|
||||
}
|
||||
channel, requests, err := newChannel.Accept()
|
||||
if err != nil {
|
||||
log.Fatal("could not accept channel.", err)
|
||||
}
|
||||
fmt.Fprintf(debugStream, "Channel accepted\n")
|
||||
|
||||
// Sessions have out-of-band requests such as "shell",
|
||||
// "pty-req" and "env". Here we handle only the
|
||||
// "subsystem" request.
|
||||
go func(in <-chan *ssh.Request) {
|
||||
for req := range in {
|
||||
fmt.Fprintf(debugStream, "Request: %v\n", req.Type)
|
||||
ok := false
|
||||
switch req.Type {
|
||||
case "subsystem":
|
||||
fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:])
|
||||
if string(req.Payload[4:]) == "sftp" {
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(debugStream, " - accepted: %v\n", ok)
|
||||
req.Reply(ok, nil)
|
||||
}
|
||||
}(requests)
|
||||
|
||||
root := sftp.InMemHandler()
|
||||
server := sftp.NewRequestServer(channel, root)
|
||||
if err := server.Serve(); err == io.EOF {
|
||||
server.Close()
|
||||
log.Print("sftp client exited session.")
|
||||
} else if err != nil {
|
||||
log.Fatal("sftp server completed with error:", err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -6,6 +6,7 @@ package main
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
|
@ -136,7 +137,10 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := server.Serve(); err != nil {
|
||||
if err := server.Serve(); err == io.EOF {
|
||||
server.Close()
|
||||
log.Print("sftp client exited session.")
|
||||
} else if err != nil {
|
||||
log.Fatal("sftp server completed with error:", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ func main() {
|
|||
config := ssh.ClientConfig{
|
||||
User: *USER,
|
||||
Auth: auths,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", *HOST, *PORT)
|
||||
conn, err := ssh.Dial("tcp", addr, &config)
|
||||
|
|
|
@ -43,6 +43,7 @@ func main() {
|
|||
config := ssh.ClientConfig{
|
||||
User: *USER,
|
||||
Auth: auths,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", *HOST, *PORT)
|
||||
conn, err := ssh.Dial("tcp", addr, &config)
|
||||
|
|
345
vendor/src/github.com/pkg/sftp/match.go
vendored
Normal file
345
vendor/src/github.com/pkg/sftp/match.go
vendored
Normal file
|
@ -0,0 +1,345 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ErrBadPattern indicates a globbing pattern was malformed.
|
||||
var ErrBadPattern = errors.New("syntax error in pattern")
|
||||
|
||||
// Unix separator
|
||||
const separator = "/"
|
||||
|
||||
// Match reports whether name matches the shell file name pattern.
|
||||
// The pattern syntax is:
|
||||
//
|
||||
// pattern:
|
||||
// { term }
|
||||
// term:
|
||||
// '*' matches any sequence of non-Separator characters
|
||||
// '?' matches any single non-Separator character
|
||||
// '[' [ '^' ] { character-range } ']'
|
||||
// character class (must be non-empty)
|
||||
// c matches character c (c != '*', '?', '\\', '[')
|
||||
// '\\' c matches character c
|
||||
//
|
||||
// character-range:
|
||||
// c matches character c (c != '\\', '-', ']')
|
||||
// '\\' c matches character c
|
||||
// lo '-' hi matches character c for lo <= c <= hi
|
||||
//
|
||||
// Match requires pattern to match all of name, not just a substring.
|
||||
// The only possible returned error is ErrBadPattern, when pattern
|
||||
// is malformed.
|
||||
//
|
||||
//
|
||||
func Match(pattern, name string) (matched bool, err error) {
|
||||
Pattern:
|
||||
for len(pattern) > 0 {
|
||||
var star bool
|
||||
var chunk string
|
||||
star, chunk, pattern = scanChunk(pattern)
|
||||
if star && chunk == "" {
|
||||
// Trailing * matches rest of string unless it has a /.
|
||||
return !strings.Contains(name, separator), nil
|
||||
}
|
||||
// Look for match at current position.
|
||||
t, ok, err := matchChunk(chunk, name)
|
||||
// if we're the last chunk, make sure we've exhausted the name
|
||||
// otherwise we'll give a false result even if we could still match
|
||||
// using the star
|
||||
if ok && (len(t) == 0 || len(pattern) > 0) {
|
||||
name = t
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if star {
|
||||
// Look for match skipping i+1 bytes.
|
||||
// Cannot skip /.
|
||||
for i := 0; i < len(name) && !isPathSeparator(name[i]); i++ {
|
||||
t, ok, err := matchChunk(chunk, name[i+1:])
|
||||
if ok {
|
||||
// if we're the last chunk, make sure we exhausted the name
|
||||
if len(pattern) == 0 && len(t) > 0 {
|
||||
continue
|
||||
}
|
||||
name = t
|
||||
continue Pattern
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return len(name) == 0, nil
|
||||
}
|
||||
|
||||
// detect if byte(char) is path separator
|
||||
func isPathSeparator(c byte) bool {
|
||||
return string(c) == "/"
|
||||
}
|
||||
|
||||
// scanChunk gets the next segment of pattern, which is a non-star string
|
||||
// possibly preceded by a star.
|
||||
func scanChunk(pattern string) (star bool, chunk, rest string) {
|
||||
for len(pattern) > 0 && pattern[0] == '*' {
|
||||
pattern = pattern[1:]
|
||||
star = true
|
||||
}
|
||||
inrange := false
|
||||
var i int
|
||||
Scan:
|
||||
for i = 0; i < len(pattern); i++ {
|
||||
switch pattern[i] {
|
||||
case '\\':
|
||||
|
||||
// error check handled in matchChunk: bad pattern.
|
||||
if i+1 < len(pattern) {
|
||||
i++
|
||||
}
|
||||
case '[':
|
||||
inrange = true
|
||||
case ']':
|
||||
inrange = false
|
||||
case '*':
|
||||
if !inrange {
|
||||
break Scan
|
||||
}
|
||||
}
|
||||
}
|
||||
return star, pattern[0:i], pattern[i:]
|
||||
}
|
||||
|
||||
// matchChunk checks whether chunk matches the beginning of s.
|
||||
// If so, it returns the remainder of s (after the match).
|
||||
// Chunk is all single-character operators: literals, char classes, and ?.
|
||||
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
|
||||
for len(chunk) > 0 {
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
switch chunk[0] {
|
||||
case '[':
|
||||
// character class
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
s = s[n:]
|
||||
chunk = chunk[1:]
|
||||
// We can't end right after '[', we're expecting at least
|
||||
// a closing bracket and possibly a caret.
|
||||
if len(chunk) == 0 {
|
||||
err = ErrBadPattern
|
||||
return
|
||||
}
|
||||
// possibly negated
|
||||
negated := chunk[0] == '^'
|
||||
if negated {
|
||||
chunk = chunk[1:]
|
||||
}
|
||||
// parse all ranges
|
||||
match := false
|
||||
nrange := 0
|
||||
for {
|
||||
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
|
||||
chunk = chunk[1:]
|
||||
break
|
||||
}
|
||||
var lo, hi rune
|
||||
if lo, chunk, err = getEsc(chunk); err != nil {
|
||||
return
|
||||
}
|
||||
hi = lo
|
||||
if chunk[0] == '-' {
|
||||
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if lo <= r && r <= hi {
|
||||
match = true
|
||||
}
|
||||
nrange++
|
||||
}
|
||||
if match == negated {
|
||||
return
|
||||
}
|
||||
|
||||
case '?':
|
||||
if isPathSeparator(s[0]) {
|
||||
return
|
||||
}
|
||||
_, n := utf8.DecodeRuneInString(s)
|
||||
s = s[n:]
|
||||
chunk = chunk[1:]
|
||||
|
||||
case '\\':
|
||||
chunk = chunk[1:]
|
||||
if len(chunk) == 0 {
|
||||
err = ErrBadPattern
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if chunk[0] != s[0] {
|
||||
return
|
||||
}
|
||||
s = s[1:]
|
||||
chunk = chunk[1:]
|
||||
}
|
||||
}
|
||||
return s, true, nil
|
||||
}
|
||||
|
||||
// getEsc gets a possibly-escaped character from chunk, for a character class.
|
||||
func getEsc(chunk string) (r rune, nchunk string, err error) {
|
||||
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
|
||||
err = ErrBadPattern
|
||||
return
|
||||
}
|
||||
if chunk[0] == '\\' {
|
||||
chunk = chunk[1:]
|
||||
if len(chunk) == 0 {
|
||||
err = ErrBadPattern
|
||||
return
|
||||
}
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(chunk)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
err = ErrBadPattern
|
||||
}
|
||||
nchunk = chunk[n:]
|
||||
if len(nchunk) == 0 {
|
||||
err = ErrBadPattern
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Split splits path immediately following the final Separator,
|
||||
// separating it into a directory and file name component.
|
||||
// If there is no Separator in path, Split returns an empty dir
|
||||
// and file set to path.
|
||||
// The returned values have the property that path = dir+file.
|
||||
func Split(path string) (dir, file string) {
|
||||
i := len(path) - 1
|
||||
for i >= 0 && !isPathSeparator(path[i]) {
|
||||
i--
|
||||
}
|
||||
return path[:i+1], path[i+1:]
|
||||
}
|
||||
|
||||
// Glob returns the names of all files matching pattern or nil
|
||||
// if there is no matching file. The syntax of patterns is the same
|
||||
// as in Match. The pattern may describe hierarchical names such as
|
||||
// /usr/*/bin/ed (assuming the Separator is '/').
|
||||
//
|
||||
// Glob ignores file system errors such as I/O errors reading directories.
|
||||
// The only possible returned error is ErrBadPattern, when pattern
|
||||
// is malformed.
|
||||
func (c *Client) Glob(pattern string) (matches []string, err error) {
|
||||
if !hasMeta(pattern) {
|
||||
file, err := c.Lstat(pattern)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
dir, _ := Split(pattern)
|
||||
dir = cleanGlobPath(dir)
|
||||
return []string{Join(dir, file.Name())}, nil
|
||||
}
|
||||
|
||||
dir, file := Split(pattern)
|
||||
dir = cleanGlobPath(dir)
|
||||
|
||||
if !hasMeta(dir) {
|
||||
return c.glob(dir, file, nil)
|
||||
}
|
||||
|
||||
// Prevent infinite recursion. See issue 15879.
|
||||
if dir == pattern {
|
||||
return nil, ErrBadPattern
|
||||
}
|
||||
|
||||
var m []string
|
||||
m, err = c.Glob(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, d := range m {
|
||||
matches, err = c.glob(d, file, matches)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// cleanGlobPath prepares path for glob matching.
|
||||
func cleanGlobPath(path string) string {
|
||||
switch path {
|
||||
case "":
|
||||
return "."
|
||||
case string(separator):
|
||||
// do nothing to the path
|
||||
return path
|
||||
default:
|
||||
return path[0 : len(path)-1] // chop off trailing separator
|
||||
}
|
||||
}
|
||||
|
||||
// glob searches for files matching pattern in the directory dir
|
||||
// and appends them to matches. If the directory cannot be
|
||||
// opened, it returns the existing matches. New matches are
|
||||
// added in lexicographical order.
|
||||
func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) {
|
||||
m = matches
|
||||
fi, err := c.Stat(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return
|
||||
}
|
||||
names, err := c.ReadDir(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
//sort.Strings(names)
|
||||
|
||||
for _, n := range names {
|
||||
matched, err := Match(pattern, n.Name())
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
if matched {
|
||||
m = append(m, Join(dir, n.Name()))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Join joins any number of path elements into a single path, adding
|
||||
// a Separator if necessary.
|
||||
// all empty strings are ignored.
|
||||
func Join(elem ...string) string {
|
||||
return join(elem)
|
||||
}
|
||||
func join(elem []string) string {
|
||||
// If there's a bug here, fix the logic in ./path_plan9.go too.
|
||||
for i, e := range elem {
|
||||
if e != "" {
|
||||
return strings.Join(elem[i:], string(separator))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// hasMeta reports whether path contains any of the magic characters
|
||||
// recognized by Match.
|
||||
func hasMeta(path string) bool {
|
||||
// TODO(niemeyer): Should other magic characters be added here?
|
||||
return strings.ContainsAny(path, "*?[")
|
||||
}
|
156
vendor/src/github.com/pkg/sftp/packet-manager.go
vendored
Normal file
156
vendor/src/github.com/pkg/sftp/packet-manager.go
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// The goal of the packetManager is to keep the outgoing packets in the same
|
||||
// order as the incoming. This is due to some sftp clients requiring this
|
||||
// behavior (eg. winscp).
|
||||
|
||||
type packetSender interface {
|
||||
sendPacket(encoding.BinaryMarshaler) error
|
||||
}
|
||||
|
||||
type packetManager struct {
|
||||
requests chan requestPacket
|
||||
responses chan responsePacket
|
||||
fini chan struct{}
|
||||
incoming requestPacketIDs
|
||||
outgoing responsePackets
|
||||
sender packetSender // connection object
|
||||
working *sync.WaitGroup
|
||||
}
|
||||
|
||||
func newPktMgr(sender packetSender) packetManager {
|
||||
s := packetManager{
|
||||
requests: make(chan requestPacket, sftpServerWorkerCount),
|
||||
responses: make(chan responsePacket, sftpServerWorkerCount),
|
||||
fini: make(chan struct{}),
|
||||
incoming: make([]uint32, 0, sftpServerWorkerCount),
|
||||
outgoing: make([]responsePacket, 0, sftpServerWorkerCount),
|
||||
sender: sender,
|
||||
working: &sync.WaitGroup{},
|
||||
}
|
||||
go s.controller()
|
||||
return s
|
||||
}
|
||||
|
||||
// register incoming packets to be handled
|
||||
// send id of 0 for packets without id
|
||||
func (s packetManager) incomingPacket(pkt requestPacket) {
|
||||
s.working.Add(1)
|
||||
s.requests <- pkt // buffer == sftpServerWorkerCount
|
||||
}
|
||||
|
||||
// register outgoing packets as being ready
|
||||
func (s packetManager) readyPacket(pkt responsePacket) {
|
||||
s.responses <- pkt
|
||||
s.working.Done()
|
||||
}
|
||||
|
||||
// shut down packetManager controller
|
||||
func (s packetManager) close() {
|
||||
// pause until current packets are processed
|
||||
s.working.Wait()
|
||||
close(s.fini)
|
||||
}
|
||||
|
||||
// Passed a worker function, returns a channel for incoming packets.
|
||||
// The goal is to process packets in the order they are received as is
|
||||
// requires by section 7 of the RFC, while maximizing throughput of file
|
||||
// transfers.
|
||||
func (s *packetManager) workerChan(runWorker func(requestChan)) requestChan {
|
||||
|
||||
rwChan := make(chan requestPacket, sftpServerWorkerCount)
|
||||
for i := 0; i < sftpServerWorkerCount; i++ {
|
||||
runWorker(rwChan)
|
||||
}
|
||||
|
||||
cmdChan := make(chan requestPacket)
|
||||
runWorker(cmdChan)
|
||||
|
||||
pktChan := make(chan requestPacket, sftpServerWorkerCount)
|
||||
go func() {
|
||||
// start with cmdChan
|
||||
curChan := cmdChan
|
||||
for pkt := range pktChan {
|
||||
// on file open packet, switch to rwChan
|
||||
switch pkt.(type) {
|
||||
case *sshFxpOpenPacket:
|
||||
curChan = rwChan
|
||||
// on file close packet, switch back to cmdChan
|
||||
// after waiting for any reads/writes to finish
|
||||
case *sshFxpClosePacket:
|
||||
// wait for rwChan to finish
|
||||
s.working.Wait()
|
||||
// stop using rwChan
|
||||
curChan = cmdChan
|
||||
}
|
||||
s.incomingPacket(pkt)
|
||||
curChan <- pkt
|
||||
}
|
||||
close(rwChan)
|
||||
close(cmdChan)
|
||||
s.close()
|
||||
}()
|
||||
|
||||
return pktChan
|
||||
}
|
||||
|
||||
// process packets
|
||||
func (s *packetManager) controller() {
|
||||
for {
|
||||
select {
|
||||
case pkt := <-s.requests:
|
||||
debug("incoming id: %v", pkt.id())
|
||||
s.incoming = append(s.incoming, pkt.id())
|
||||
if len(s.incoming) > 1 {
|
||||
s.incoming.Sort()
|
||||
}
|
||||
case pkt := <-s.responses:
|
||||
debug("outgoing pkt: %v", pkt.id())
|
||||
s.outgoing = append(s.outgoing, pkt)
|
||||
if len(s.outgoing) > 1 {
|
||||
s.outgoing.Sort()
|
||||
}
|
||||
case <-s.fini:
|
||||
return
|
||||
}
|
||||
s.maybeSendPackets()
|
||||
}
|
||||
}
|
||||
|
||||
// send as many packets as are ready
|
||||
func (s *packetManager) maybeSendPackets() {
|
||||
for {
|
||||
if len(s.outgoing) == 0 || len(s.incoming) == 0 {
|
||||
debug("break! -- outgoing: %v; incoming: %v",
|
||||
len(s.outgoing), len(s.incoming))
|
||||
break
|
||||
}
|
||||
out := s.outgoing[0]
|
||||
in := s.incoming[0]
|
||||
// debug("incoming: %v", s.incoming)
|
||||
// debug("outgoing: %v", outfilter(s.outgoing))
|
||||
if in == out.id() {
|
||||
s.sender.sendPacket(out)
|
||||
// pop off heads
|
||||
copy(s.incoming, s.incoming[1:]) // shift left
|
||||
s.incoming = s.incoming[:len(s.incoming)-1] // remove last
|
||||
copy(s.outgoing, s.outgoing[1:]) // shift left
|
||||
s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func outfilter(o []responsePacket) []uint32 {
|
||||
res := make([]uint32, 0, len(o))
|
||||
for _, v := range o {
|
||||
res = append(res, v.id())
|
||||
}
|
||||
return res
|
||||
}
|
21
vendor/src/github.com/pkg/sftp/packet-manager_go1.8.go
vendored
Normal file
21
vendor/src/github.com/pkg/sftp/packet-manager_go1.8.go
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// +build go1.8
|
||||
|
||||
package sftp
|
||||
|
||||
import "sort"
|
||||
|
||||
type responsePackets []responsePacket
|
||||
|
||||
func (r responsePackets) Sort() {
|
||||
sort.Slice(r, func(i, j int) bool {
|
||||
return r[i].id() < r[j].id()
|
||||
})
|
||||
}
|
||||
|
||||
type requestPacketIDs []uint32
|
||||
|
||||
func (r requestPacketIDs) Sort() {
|
||||
sort.Slice(r, func(i, j int) bool {
|
||||
return r[i] < r[j]
|
||||
})
|
||||
}
|
21
vendor/src/github.com/pkg/sftp/packet-manager_legacy.go
vendored
Normal file
21
vendor/src/github.com/pkg/sftp/packet-manager_legacy.go
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// +build !go1.8
|
||||
|
||||
package sftp
|
||||
|
||||
import "sort"
|
||||
|
||||
// for sorting/ordering outgoing
|
||||
type responsePackets []responsePacket
|
||||
|
||||
func (r responsePackets) Len() int { return len(r) }
|
||||
func (r responsePackets) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r responsePackets) Less(i, j int) bool { return r[i].id() < r[j].id() }
|
||||
func (r responsePackets) Sort() { sort.Sort(r) }
|
||||
|
||||
// for sorting/ordering incoming
|
||||
type requestPacketIDs []uint32
|
||||
|
||||
func (r requestPacketIDs) Len() int { return len(r) }
|
||||
func (r requestPacketIDs) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r requestPacketIDs) Less(i, j int) bool { return r[i] < r[j] }
|
||||
func (r requestPacketIDs) Sort() { sort.Sort(r) }
|
140
vendor/src/github.com/pkg/sftp/packet-manager_test.go
vendored
Normal file
140
vendor/src/github.com/pkg/sftp/packet-manager_test.go
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type _testSender struct {
|
||||
sent chan encoding.BinaryMarshaler
|
||||
}
|
||||
|
||||
func newTestSender() *_testSender {
|
||||
return &_testSender{make(chan encoding.BinaryMarshaler)}
|
||||
}
|
||||
|
||||
func (s _testSender) sendPacket(p encoding.BinaryMarshaler) error {
|
||||
s.sent <- p
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakepacket uint32
|
||||
|
||||
func (fakepacket) MarshalBinary() ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (fakepacket) UnmarshalBinary([]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f fakepacket) id() uint32 {
|
||||
return uint32(f)
|
||||
}
|
||||
|
||||
type pair struct {
|
||||
in fakepacket
|
||||
out fakepacket
|
||||
}
|
||||
|
||||
// basic test
|
||||
var ttable1 = []pair{
|
||||
pair{fakepacket(0), fakepacket(0)},
|
||||
pair{fakepacket(1), fakepacket(1)},
|
||||
pair{fakepacket(2), fakepacket(2)},
|
||||
pair{fakepacket(3), fakepacket(3)},
|
||||
}
|
||||
|
||||
// outgoing packets out of order
|
||||
var ttable2 = []pair{
|
||||
pair{fakepacket(0), fakepacket(0)},
|
||||
pair{fakepacket(1), fakepacket(4)},
|
||||
pair{fakepacket(2), fakepacket(1)},
|
||||
pair{fakepacket(3), fakepacket(3)},
|
||||
pair{fakepacket(4), fakepacket(2)},
|
||||
}
|
||||
|
||||
// incoming packets out of order
|
||||
var ttable3 = []pair{
|
||||
pair{fakepacket(2), fakepacket(0)},
|
||||
pair{fakepacket(1), fakepacket(1)},
|
||||
pair{fakepacket(3), fakepacket(2)},
|
||||
pair{fakepacket(0), fakepacket(3)},
|
||||
}
|
||||
|
||||
var tables = [][]pair{ttable1, ttable2, ttable3}
|
||||
|
||||
func TestPacketManager(t *testing.T) {
|
||||
sender := newTestSender()
|
||||
s := newPktMgr(sender)
|
||||
|
||||
for i := range tables {
|
||||
table := tables[i]
|
||||
for _, p := range table {
|
||||
s.incomingPacket(p.in)
|
||||
}
|
||||
for _, p := range table {
|
||||
s.readyPacket(p.out)
|
||||
}
|
||||
for i := 0; i < len(table); i++ {
|
||||
pkt := <-sender.sent
|
||||
id := pkt.(fakepacket).id()
|
||||
assert.Equal(t, id, uint32(i))
|
||||
}
|
||||
}
|
||||
s.close()
|
||||
}
|
||||
|
||||
// Test what happens when the pool processes a close packet on a file that it
|
||||
// is still reading from.
|
||||
func TestCloseOutOfOrder(t *testing.T) {
|
||||
packets := []requestPacket{
|
||||
&sshFxpRemovePacket{ID: 0, Filename: "foo"},
|
||||
&sshFxpOpenPacket{ID: 1},
|
||||
&sshFxpWritePacket{ID: 2, Handle: "foo"},
|
||||
&sshFxpWritePacket{ID: 3, Handle: "foo"},
|
||||
&sshFxpWritePacket{ID: 4, Handle: "foo"},
|
||||
&sshFxpWritePacket{ID: 5, Handle: "foo"},
|
||||
&sshFxpClosePacket{ID: 6, Handle: "foo"},
|
||||
&sshFxpRemovePacket{ID: 7, Filename: "foo"},
|
||||
}
|
||||
|
||||
recvChan := make(chan requestPacket, len(packets)+1)
|
||||
sender := newTestSender()
|
||||
pktMgr := newPktMgr(sender)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(packets))
|
||||
runWorker := func(ch requestChan) {
|
||||
go func() {
|
||||
for pkt := range ch {
|
||||
if _, ok := pkt.(*sshFxpWritePacket); ok {
|
||||
// sleep to cause writes to come after close/remove
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
pktMgr.working.Done()
|
||||
recvChan <- pkt
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
}
|
||||
pktChan := pktMgr.workerChan(runWorker)
|
||||
for _, p := range packets {
|
||||
pktChan <- p
|
||||
}
|
||||
wg.Wait()
|
||||
close(recvChan)
|
||||
received := []requestPacket{}
|
||||
for p := range recvChan {
|
||||
received = append(received, p)
|
||||
}
|
||||
if received[len(received)-2].id() != packets[len(packets)-2].id() {
|
||||
t.Fatal("Packets processed out of order1:", received, packets)
|
||||
}
|
||||
if received[len(received)-1].id() != packets[len(packets)-1].id() {
|
||||
t.Fatal("Packets processed out of order2:", received, packets)
|
||||
}
|
||||
}
|
141
vendor/src/github.com/pkg/sftp/packet-typing.go
vendored
Normal file
141
vendor/src/github.com/pkg/sftp/packet-typing.go
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// all incoming packets
|
||||
type requestPacket interface {
|
||||
encoding.BinaryUnmarshaler
|
||||
id() uint32
|
||||
}
|
||||
|
||||
type requestChan chan requestPacket
|
||||
|
||||
type responsePacket interface {
|
||||
encoding.BinaryMarshaler
|
||||
id() uint32
|
||||
}
|
||||
|
||||
// interfaces to group types
|
||||
type hasPath interface {
|
||||
requestPacket
|
||||
getPath() string
|
||||
}
|
||||
|
||||
type hasHandle interface {
|
||||
requestPacket
|
||||
getHandle() string
|
||||
}
|
||||
|
||||
type isOpener interface {
|
||||
hasPath
|
||||
isOpener()
|
||||
}
|
||||
|
||||
type notReadOnly interface {
|
||||
notReadOnly()
|
||||
}
|
||||
|
||||
//// define types by adding methods
|
||||
// hasPath
|
||||
func (p sshFxpLstatPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpStatPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpRmdirPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpReadlinkPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpRealpathPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpMkdirPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpSetstatPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpStatvfsPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpRemovePacket) getPath() string { return p.Filename }
|
||||
func (p sshFxpRenamePacket) getPath() string { return p.Oldpath }
|
||||
func (p sshFxpSymlinkPacket) getPath() string { return p.Targetpath }
|
||||
|
||||
// Openers implement hasPath and isOpener
|
||||
func (p sshFxpOpendirPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpOpendirPacket) isOpener() {}
|
||||
func (p sshFxpOpenPacket) getPath() string { return p.Path }
|
||||
func (p sshFxpOpenPacket) isOpener() {}
|
||||
|
||||
// hasHandle
|
||||
func (p sshFxpFstatPacket) getHandle() string { return p.Handle }
|
||||
func (p sshFxpFsetstatPacket) getHandle() string { return p.Handle }
|
||||
func (p sshFxpReadPacket) getHandle() string { return p.Handle }
|
||||
func (p sshFxpWritePacket) getHandle() string { return p.Handle }
|
||||
func (p sshFxpReaddirPacket) getHandle() string { return p.Handle }
|
||||
|
||||
// notReadOnly
|
||||
func (p sshFxpWritePacket) notReadOnly() {}
|
||||
func (p sshFxpSetstatPacket) notReadOnly() {}
|
||||
func (p sshFxpFsetstatPacket) notReadOnly() {}
|
||||
func (p sshFxpRemovePacket) notReadOnly() {}
|
||||
func (p sshFxpMkdirPacket) notReadOnly() {}
|
||||
func (p sshFxpRmdirPacket) notReadOnly() {}
|
||||
func (p sshFxpRenamePacket) notReadOnly() {}
|
||||
func (p sshFxpSymlinkPacket) notReadOnly() {}
|
||||
|
||||
// this has a handle, but is only used for close
|
||||
func (p sshFxpClosePacket) getHandle() string { return p.Handle }
|
||||
|
||||
// some packets with ID are missing id()
|
||||
func (p sshFxpDataPacket) id() uint32 { return p.ID }
|
||||
func (p sshFxpStatusPacket) id() uint32 { return p.ID }
|
||||
func (p sshFxpStatResponse) id() uint32 { return p.ID }
|
||||
func (p sshFxpNamePacket) id() uint32 { return p.ID }
|
||||
func (p sshFxpHandlePacket) id() uint32 { return p.ID }
|
||||
func (p sshFxVersionPacket) id() uint32 { return 0 }
|
||||
|
||||
// take raw incoming packet data and build packet objects
|
||||
func makePacket(p rxPacket) (requestPacket, error) {
|
||||
var pkt requestPacket
|
||||
switch p.pktType {
|
||||
case ssh_FXP_INIT:
|
||||
pkt = &sshFxInitPacket{}
|
||||
case ssh_FXP_LSTAT:
|
||||
pkt = &sshFxpLstatPacket{}
|
||||
case ssh_FXP_OPEN:
|
||||
pkt = &sshFxpOpenPacket{}
|
||||
case ssh_FXP_CLOSE:
|
||||
pkt = &sshFxpClosePacket{}
|
||||
case ssh_FXP_READ:
|
||||
pkt = &sshFxpReadPacket{}
|
||||
case ssh_FXP_WRITE:
|
||||
pkt = &sshFxpWritePacket{}
|
||||
case ssh_FXP_FSTAT:
|
||||
pkt = &sshFxpFstatPacket{}
|
||||
case ssh_FXP_SETSTAT:
|
||||
pkt = &sshFxpSetstatPacket{}
|
||||
case ssh_FXP_FSETSTAT:
|
||||
pkt = &sshFxpFsetstatPacket{}
|
||||
case ssh_FXP_OPENDIR:
|
||||
pkt = &sshFxpOpendirPacket{}
|
||||
case ssh_FXP_READDIR:
|
||||
pkt = &sshFxpReaddirPacket{}
|
||||
case ssh_FXP_REMOVE:
|
||||
pkt = &sshFxpRemovePacket{}
|
||||
case ssh_FXP_MKDIR:
|
||||
pkt = &sshFxpMkdirPacket{}
|
||||
case ssh_FXP_RMDIR:
|
||||
pkt = &sshFxpRmdirPacket{}
|
||||
case ssh_FXP_REALPATH:
|
||||
pkt = &sshFxpRealpathPacket{}
|
||||
case ssh_FXP_STAT:
|
||||
pkt = &sshFxpStatPacket{}
|
||||
case ssh_FXP_RENAME:
|
||||
pkt = &sshFxpRenamePacket{}
|
||||
case ssh_FXP_READLINK:
|
||||
pkt = &sshFxpReadlinkPacket{}
|
||||
case ssh_FXP_SYMLINK:
|
||||
pkt = &sshFxpSymlinkPacket{}
|
||||
case ssh_FXP_EXTENDED:
|
||||
pkt = &sshFxpExtendedPacket{}
|
||||
default:
|
||||
return nil, errors.Errorf("unhandled packet type: %s", p.pktType)
|
||||
}
|
||||
if err := pkt.UnmarshalBinary(p.pktBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pkt, nil
|
||||
}
|
3
vendor/src/github.com/pkg/sftp/packet.go
vendored
3
vendor/src/github.com/pkg/sftp/packet.go
vendored
|
@ -170,9 +170,6 @@ func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) {
|
|||
return ep, b, err
|
||||
}
|
||||
ep.Data, b, err = unmarshalStringSafe(b)
|
||||
if err != nil {
|
||||
return ep, b, err
|
||||
}
|
||||
return ep, b, err
|
||||
}
|
||||
|
||||
|
|
244
vendor/src/github.com/pkg/sftp/request-example.go
vendored
Normal file
244
vendor/src/github.com/pkg/sftp/request-example.go
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
package sftp
|
||||
|
||||
// This serves as an example of how to implement the request server handler as
|
||||
// well as a dummy backend for testing. It implements an in-memory backend that
|
||||
// works as a very simple filesystem with simple flat key-value lookup system.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// InMemHandler returns a Hanlders object with the test handlers
|
||||
func InMemHandler() Handlers {
|
||||
root := &root{
|
||||
files: make(map[string]*memFile),
|
||||
}
|
||||
root.memFile = newMemFile("/", true)
|
||||
return Handlers{root, root, root, root}
|
||||
}
|
||||
|
||||
// Handlers
|
||||
func (fs *root) Fileread(r Request) (io.ReaderAt, error) {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
file, err := fs.fetch(r.Filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if file.symlink != "" {
|
||||
file, err = fs.fetch(file.symlink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return file.ReaderAt()
|
||||
}
|
||||
|
||||
func (fs *root) Filewrite(r Request) (io.WriterAt, error) {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
file, err := fs.fetch(r.Filepath)
|
||||
if err == os.ErrNotExist {
|
||||
dir, err := fs.fetch(filepath.Dir(r.Filepath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !dir.isdir {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
file = newMemFile(r.Filepath, false)
|
||||
fs.files[r.Filepath] = file
|
||||
}
|
||||
return file.WriterAt()
|
||||
}
|
||||
|
||||
func (fs *root) Filecmd(r Request) error {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
switch r.Method {
|
||||
case "Setstat":
|
||||
return nil
|
||||
case "Rename":
|
||||
file, err := fs.fetch(r.Filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := fs.files[r.Target]; ok {
|
||||
return &os.LinkError{Op: "rename", Old: r.Filepath, New: r.Target,
|
||||
Err: fmt.Errorf("dest file exists")}
|
||||
}
|
||||
fs.files[r.Target] = file
|
||||
delete(fs.files, r.Filepath)
|
||||
case "Rmdir", "Remove":
|
||||
_, err := fs.fetch(filepath.Dir(r.Filepath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(fs.files, r.Filepath)
|
||||
case "Mkdir":
|
||||
_, err := fs.fetch(filepath.Dir(r.Filepath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.files[r.Filepath] = newMemFile(r.Filepath, true)
|
||||
case "Symlink":
|
||||
_, err := fs.fetch(r.Filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
link := newMemFile(r.Target, false)
|
||||
link.symlink = r.Filepath
|
||||
fs.files[r.Target] = link
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *root) Fileinfo(r Request) ([]os.FileInfo, error) {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
switch r.Method {
|
||||
case "List":
|
||||
var err error
|
||||
batch_size := 10
|
||||
current_offset := 0
|
||||
if token := r.LsNext(); token != "" {
|
||||
current_offset, err = strconv.Atoi(token)
|
||||
if err != nil {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
}
|
||||
ordered_names := []string{}
|
||||
for fn, _ := range fs.files {
|
||||
if filepath.Dir(fn) == r.Filepath {
|
||||
ordered_names = append(ordered_names, fn)
|
||||
}
|
||||
}
|
||||
sort.Sort(sort.StringSlice(ordered_names))
|
||||
list := make([]os.FileInfo, len(ordered_names))
|
||||
for i, fn := range ordered_names {
|
||||
list[i] = fs.files[fn]
|
||||
}
|
||||
if len(list) < current_offset {
|
||||
return nil, io.EOF
|
||||
}
|
||||
new_offset := current_offset + batch_size
|
||||
if new_offset > len(list) {
|
||||
new_offset = len(list)
|
||||
}
|
||||
r.LsSave(strconv.Itoa(new_offset))
|
||||
return list[current_offset:new_offset], nil
|
||||
case "Stat":
|
||||
file, err := fs.fetch(r.Filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []os.FileInfo{file}, nil
|
||||
case "Readlink":
|
||||
file, err := fs.fetch(r.Filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if file.symlink != "" {
|
||||
file, err = fs.fetch(file.symlink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return []os.FileInfo{file}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// In memory file-system-y thing that the Hanlders live on
|
||||
type root struct {
|
||||
*memFile
|
||||
files map[string]*memFile
|
||||
filesLock sync.Mutex
|
||||
}
|
||||
|
||||
func (fs *root) fetch(path string) (*memFile, error) {
|
||||
if path == "/" {
|
||||
return fs.memFile, nil
|
||||
}
|
||||
if file, ok := fs.files[path]; ok {
|
||||
return file, nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// Implements os.FileInfo, Reader and Writer interfaces.
|
||||
// These are the 3 interfaces necessary for the Handlers.
|
||||
type memFile struct {
|
||||
name string
|
||||
modtime time.Time
|
||||
symlink string
|
||||
isdir bool
|
||||
content []byte
|
||||
contentLock sync.RWMutex
|
||||
}
|
||||
|
||||
// factory to make sure modtime is set
|
||||
func newMemFile(name string, isdir bool) *memFile {
|
||||
return &memFile{
|
||||
name: name,
|
||||
modtime: time.Now(),
|
||||
isdir: isdir,
|
||||
}
|
||||
}
|
||||
|
||||
// Have memFile fulfill os.FileInfo interface
|
||||
func (f *memFile) Name() string { return filepath.Base(f.name) }
|
||||
func (f *memFile) Size() int64 { return int64(len(f.content)) }
|
||||
func (f *memFile) Mode() os.FileMode {
|
||||
ret := os.FileMode(0644)
|
||||
if f.isdir {
|
||||
ret = os.FileMode(0755) | os.ModeDir
|
||||
}
|
||||
if f.symlink != "" {
|
||||
ret = os.FileMode(0777) | os.ModeSymlink
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func (f *memFile) ModTime() time.Time { return f.modtime }
|
||||
func (f *memFile) IsDir() bool { return f.isdir }
|
||||
func (f *memFile) Sys() interface{} {
|
||||
return fakeFileInfoSys()
|
||||
}
|
||||
|
||||
// Read/Write
|
||||
func (f *memFile) ReaderAt() (io.ReaderAt, error) {
|
||||
if f.isdir {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
return bytes.NewReader(f.content), nil
|
||||
}
|
||||
|
||||
func (f *memFile) WriterAt() (io.WriterAt, error) {
|
||||
if f.isdir {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
func (f *memFile) WriteAt(p []byte, off int64) (int, error) {
|
||||
// fmt.Println(string(p), off)
|
||||
// mimic write delays, should be optional
|
||||
time.Sleep(time.Microsecond * time.Duration(len(p)))
|
||||
f.contentLock.Lock()
|
||||
defer f.contentLock.Unlock()
|
||||
plen := len(p) + int(off)
|
||||
if plen >= len(f.content) {
|
||||
nc := make([]byte, plen)
|
||||
copy(nc, f.content)
|
||||
f.content = nc
|
||||
}
|
||||
copy(f.content[off:], p)
|
||||
return len(p), nil
|
||||
}
|
30
vendor/src/github.com/pkg/sftp/request-interfaces.go
vendored
Normal file
30
vendor/src/github.com/pkg/sftp/request-interfaces.go
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Interfaces are differentiated based on required returned values.
|
||||
// All input arguments are to be pulled from Request (the only arg).
|
||||
|
||||
// FileReader should return an io.Reader for the filepath
|
||||
type FileReader interface {
|
||||
Fileread(Request) (io.ReaderAt, error)
|
||||
}
|
||||
|
||||
// FileWriter should return an io.Writer for the filepath
|
||||
type FileWriter interface {
|
||||
Filewrite(Request) (io.WriterAt, error)
|
||||
}
|
||||
|
||||
// FileCmder should return an error (rename, remove, setstate, etc.)
|
||||
type FileCmder interface {
|
||||
Filecmd(Request) error
|
||||
}
|
||||
|
||||
// FileInfoer should return file listing info and errors (readdir, stat)
|
||||
// note stat requests would return a list of 1
|
||||
type FileInfoer interface {
|
||||
Fileinfo(Request) ([]os.FileInfo, error)
|
||||
}
|
48
vendor/src/github.com/pkg/sftp/request-readme.md
vendored
Normal file
48
vendor/src/github.com/pkg/sftp/request-readme.md
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
# Request Based SFTP API
|
||||
|
||||
The request based API allows for custom backends in a way similar to the http
|
||||
package. In order to create a backend you need to implement 4 handler
|
||||
interfaces; one for reading, one for writing, one for misc commands and one for
|
||||
listing files. Each has 1 required method and in each case those methods take
|
||||
the Request as the only parameter and they each return something different.
|
||||
These 4 interfaces are enough to handle all the SFTP traffic in a simplified
|
||||
manner.
|
||||
|
||||
The Request structure has 5 public fields which you will deal with.
|
||||
|
||||
- Method (string) - string name of incoming call
|
||||
- Filepath (string) - path of file to act on
|
||||
- Attrs ([]byte) - byte string of file attribute data
|
||||
- Target (string) - target path for renames and sym-links
|
||||
|
||||
Below are the methods and a brief description of what they need to do.
|
||||
|
||||
### Fileread(*Request) (io.Reader, error)
|
||||
|
||||
Handler for "Get" method and returns an io.Reader for the file which the server
|
||||
then sends to the client.
|
||||
|
||||
### Filewrite(*Request) (io.Writer, error)
|
||||
|
||||
Handler for "Put" method and returns an io.Writer for the file which the server
|
||||
then writes the uploaded file to.
|
||||
|
||||
### Filecmd(*Request) error
|
||||
|
||||
Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the
|
||||
appropriate changes and returns nil for success or an filesystem like error
|
||||
(eg. os.ErrNotExist).
|
||||
|
||||
### Fileinfo(*Request) ([]os.FileInfo, error)
|
||||
|
||||
Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs
|
||||
with the data on the files and returns in a list (list of 1 for Stat and
|
||||
Readlink).
|
||||
|
||||
|
||||
## TODO
|
||||
|
||||
- Add support for API users to see trace/debugging info of what is going on
|
||||
inside SFTP server.
|
||||
- Consider adding support for SFTP file append only mode.
|
||||
|
231
vendor/src/github.com/pkg/sftp/request-server.go
vendored
Normal file
231
vendor/src/github.com/pkg/sftp/request-server.go
vendored
Normal file
|
@ -0,0 +1,231 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var maxTxPacket uint32 = 1 << 15
|
||||
|
||||
type handleHandler func(string) string
|
||||
|
||||
// Handlers contains the 4 SFTP server request handlers.
|
||||
type Handlers struct {
|
||||
FileGet FileReader
|
||||
FilePut FileWriter
|
||||
FileCmd FileCmder
|
||||
FileInfo FileInfoer
|
||||
}
|
||||
|
||||
// RequestServer abstracts the sftp protocol with an http request-like protocol
|
||||
type RequestServer struct {
|
||||
*serverConn
|
||||
Handlers Handlers
|
||||
pktMgr packetManager
|
||||
openRequests map[string]Request
|
||||
openRequestLock sync.RWMutex
|
||||
handleCount int
|
||||
}
|
||||
|
||||
// NewRequestServer creates/allocates/returns new RequestServer.
|
||||
// Normally there there will be one server per user-session.
|
||||
func NewRequestServer(rwc io.ReadWriteCloser, h Handlers) *RequestServer {
|
||||
svrConn := &serverConn{
|
||||
conn: conn{
|
||||
Reader: rwc,
|
||||
WriteCloser: rwc,
|
||||
},
|
||||
}
|
||||
return &RequestServer{
|
||||
serverConn: svrConn,
|
||||
Handlers: h,
|
||||
pktMgr: newPktMgr(svrConn),
|
||||
openRequests: make(map[string]Request),
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RequestServer) nextRequest(r Request) string {
|
||||
rs.openRequestLock.Lock()
|
||||
defer rs.openRequestLock.Unlock()
|
||||
rs.handleCount++
|
||||
handle := strconv.Itoa(rs.handleCount)
|
||||
rs.openRequests[handle] = r
|
||||
return handle
|
||||
}
|
||||
|
||||
func (rs *RequestServer) getRequest(handle string) (Request, bool) {
|
||||
rs.openRequestLock.RLock()
|
||||
defer rs.openRequestLock.RUnlock()
|
||||
r, ok := rs.openRequests[handle]
|
||||
return r, ok
|
||||
}
|
||||
|
||||
func (rs *RequestServer) closeRequest(handle string) {
|
||||
rs.openRequestLock.Lock()
|
||||
defer rs.openRequestLock.Unlock()
|
||||
if r, ok := rs.openRequests[handle]; ok {
|
||||
r.close()
|
||||
delete(rs.openRequests, handle)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the read/write/closer to trigger exiting the main server loop
|
||||
func (rs *RequestServer) Close() error { return rs.conn.Close() }
|
||||
|
||||
// Serve requests for user session
|
||||
func (rs *RequestServer) Serve() error {
|
||||
var wg sync.WaitGroup
|
||||
runWorker := func(ch requestChan) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := rs.packetWorker(ch); err != nil {
|
||||
rs.conn.Close() // shuts down recvPacket
|
||||
}
|
||||
}()
|
||||
}
|
||||
pktChan := rs.pktMgr.workerChan(runWorker)
|
||||
|
||||
var err error
|
||||
var pkt requestPacket
|
||||
var pktType uint8
|
||||
var pktBytes []byte
|
||||
for {
|
||||
pktType, pktBytes, err = rs.recvPacket()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes})
|
||||
if err != nil {
|
||||
debug("makePacket err: %v", err)
|
||||
rs.conn.Close() // shuts down recvPacket
|
||||
break
|
||||
}
|
||||
|
||||
pktChan <- pkt
|
||||
}
|
||||
|
||||
close(pktChan) // shuts down sftpServerWorkers
|
||||
wg.Wait() // wait for all workers to exit
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (rs *RequestServer) packetWorker(pktChan chan requestPacket) error {
|
||||
for pkt := range pktChan {
|
||||
var rpkt responsePacket
|
||||
switch pkt := pkt.(type) {
|
||||
case *sshFxInitPacket:
|
||||
rpkt = sshFxVersionPacket{sftpProtocolVersion, nil}
|
||||
case *sshFxpClosePacket:
|
||||
handle := pkt.getHandle()
|
||||
rs.closeRequest(handle)
|
||||
rpkt = statusFromError(pkt, nil)
|
||||
case *sshFxpRealpathPacket:
|
||||
rpkt = cleanPath(pkt)
|
||||
case isOpener:
|
||||
handle := rs.nextRequest(requestFromPacket(pkt))
|
||||
rpkt = sshFxpHandlePacket{pkt.id(), handle}
|
||||
case *sshFxpFstatPacket:
|
||||
handle := pkt.getHandle()
|
||||
request, ok := rs.getRequest(handle)
|
||||
if !ok {
|
||||
rpkt = statusFromError(pkt, syscall.EBADF)
|
||||
} else {
|
||||
request = requestFromPacket(
|
||||
&sshFxpStatPacket{ID: pkt.id(), Path: request.Filepath})
|
||||
rpkt = rs.handle(request, pkt)
|
||||
}
|
||||
case *sshFxpFsetstatPacket:
|
||||
handle := pkt.getHandle()
|
||||
request, ok := rs.getRequest(handle)
|
||||
if !ok {
|
||||
rpkt = statusFromError(pkt, syscall.EBADF)
|
||||
} else {
|
||||
request = requestFromPacket(
|
||||
&sshFxpSetstatPacket{ID: pkt.id(), Path: request.Filepath,
|
||||
Flags: pkt.Flags, Attrs: pkt.Attrs,
|
||||
})
|
||||
rpkt = rs.handle(request, pkt)
|
||||
}
|
||||
case hasHandle:
|
||||
handle := pkt.getHandle()
|
||||
request, ok := rs.getRequest(handle)
|
||||
request.update(pkt)
|
||||
if !ok {
|
||||
rpkt = statusFromError(pkt, syscall.EBADF)
|
||||
} else {
|
||||
rpkt = rs.handle(request, pkt)
|
||||
}
|
||||
case hasPath:
|
||||
request := requestFromPacket(pkt)
|
||||
rpkt = rs.handle(request, pkt)
|
||||
default:
|
||||
return errors.Errorf("unexpected packet type %T", pkt)
|
||||
}
|
||||
|
||||
err := rs.sendPacket(rpkt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanPath(pkt *sshFxpRealpathPacket) responsePacket {
|
||||
path := pkt.getPath()
|
||||
if !filepath.IsAbs(path) {
|
||||
path = "/" + path
|
||||
} // all paths are absolute
|
||||
|
||||
cleaned_path := filepath.Clean(path)
|
||||
return &sshFxpNamePacket{
|
||||
ID: pkt.id(),
|
||||
NameAttrs: []sshFxpNameAttr{{
|
||||
Name: cleaned_path,
|
||||
LongName: cleaned_path,
|
||||
Attrs: emptyFileStat,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RequestServer) handle(request Request, pkt requestPacket) responsePacket {
|
||||
// fmt.Println("Request Method: ", request.Method)
|
||||
rpkt, err := request.handle(rs.Handlers)
|
||||
if err != nil {
|
||||
err = errorAdapter(err)
|
||||
rpkt = statusFromError(pkt, err)
|
||||
}
|
||||
return rpkt
|
||||
}
|
||||
|
||||
// Wrap underlying connection methods to use packetManager
|
||||
func (rs *RequestServer) sendPacket(m encoding.BinaryMarshaler) error {
|
||||
if pkt, ok := m.(responsePacket); ok {
|
||||
rs.pktMgr.readyPacket(pkt)
|
||||
} else {
|
||||
return errors.Errorf("unexpected packet type %T", m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *RequestServer) sendError(p ider, err error) error {
|
||||
return rs.sendPacket(statusFromError(p, err))
|
||||
}
|
||||
|
||||
// os.ErrNotExist should convert to ssh_FX_NO_SUCH_FILE, but is not recognized
|
||||
// by statusFromError. So we convert to syscall.ENOENT which it does.
|
||||
func errorAdapter(err error) error {
|
||||
if err == os.ErrNotExist {
|
||||
return syscall.ENOENT
|
||||
}
|
||||
return err
|
||||
}
|
329
vendor/src/github.com/pkg/sftp/request-server_test.go
vendored
Normal file
329
vendor/src/github.com/pkg/sftp/request-server_test.go
vendored
Normal file
|
@ -0,0 +1,329 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var _ = fmt.Print
|
||||
|
||||
type csPair struct {
|
||||
cli *Client
|
||||
svr *RequestServer
|
||||
}
|
||||
|
||||
// these must be closed in order, else client.Close will hang
|
||||
func (cs csPair) Close() {
|
||||
cs.svr.Close()
|
||||
cs.cli.Close()
|
||||
os.Remove(sock)
|
||||
}
|
||||
|
||||
func (cs csPair) testHandler() *root {
|
||||
return cs.svr.Handlers.FileGet.(*root)
|
||||
}
|
||||
|
||||
const sock = "/tmp/rstest.sock"
|
||||
|
||||
func clientRequestServerPair(t *testing.T) *csPair {
|
||||
ready := make(chan bool)
|
||||
os.Remove(sock) // either this or signal handling
|
||||
var server *RequestServer
|
||||
go func() {
|
||||
l, err := net.Listen("unix", sock)
|
||||
if err != nil {
|
||||
// neither assert nor t.Fatal reliably exit before Accept errors
|
||||
panic(err)
|
||||
}
|
||||
ready <- true
|
||||
fd, err := l.Accept()
|
||||
assert.Nil(t, err)
|
||||
handlers := InMemHandler()
|
||||
server = NewRequestServer(fd, handlers)
|
||||
server.Serve()
|
||||
}()
|
||||
<-ready
|
||||
defer os.Remove(sock)
|
||||
c, err := net.Dial("unix", sock)
|
||||
assert.Nil(t, err)
|
||||
client, err := NewClientPipe(c, c)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v\n", err)
|
||||
}
|
||||
return &csPair{client, server}
|
||||
}
|
||||
|
||||
// after adding logging, maybe check log to make sure packet handling
|
||||
// was split over more than one worker
|
||||
func TestRequestSplitWrite(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
w, err := p.cli.Create("/foo")
|
||||
assert.Nil(t, err)
|
||||
p.cli.maxPacket = 3 // force it to send in small chunks
|
||||
contents := "one two three four five six seven eight nine ten"
|
||||
w.Write([]byte(contents))
|
||||
w.Close()
|
||||
r := p.testHandler()
|
||||
f, _ := r.fetch("/foo")
|
||||
assert.Equal(t, contents, string(f.content))
|
||||
}
|
||||
|
||||
func TestRequestCache(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
foo := NewRequest("", "foo")
|
||||
bar := NewRequest("", "bar")
|
||||
fh := p.svr.nextRequest(foo)
|
||||
bh := p.svr.nextRequest(bar)
|
||||
assert.Len(t, p.svr.openRequests, 2)
|
||||
_foo, ok := p.svr.getRequest(fh)
|
||||
assert.Equal(t, foo, _foo)
|
||||
assert.True(t, ok)
|
||||
_, ok = p.svr.getRequest("zed")
|
||||
assert.False(t, ok)
|
||||
p.svr.closeRequest(fh)
|
||||
p.svr.closeRequest(bh)
|
||||
assert.Len(t, p.svr.openRequests, 0)
|
||||
}
|
||||
|
||||
func TestRequestCacheState(t *testing.T) {
|
||||
// test operation that uses open/close
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, p.svr.openRequests, 0)
|
||||
// test operation that doesn't open/close
|
||||
err = p.cli.Remove("/foo")
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, p.svr.openRequests, 0)
|
||||
}
|
||||
|
||||
func putTestFile(cli *Client, path, content string) (int, error) {
|
||||
w, err := cli.Create(path)
|
||||
if err == nil {
|
||||
defer w.Close()
|
||||
return w.Write([]byte(content))
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func TestRequestWrite(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
n, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
r := p.testHandler()
|
||||
f, err := r.fetch("/foo")
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, f.isdir)
|
||||
assert.Equal(t, f.content, []byte("hello"))
|
||||
}
|
||||
|
||||
// needs fail check
|
||||
func TestRequestFilename(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
r := p.testHandler()
|
||||
f, err := r.fetch("/foo")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, f.Name(), "foo")
|
||||
}
|
||||
|
||||
func TestRequestRead(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
rf, err := p.cli.Open("/foo")
|
||||
assert.Nil(t, err)
|
||||
defer rf.Close()
|
||||
contents := make([]byte, 5)
|
||||
n, err := rf.Read(contents)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
assert.Equal(t, 5, n)
|
||||
assert.Equal(t, "hello", string(contents[0:5]))
|
||||
}
|
||||
|
||||
func TestRequestReadFail(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
rf, err := p.cli.Open("/foo")
|
||||
assert.Nil(t, err)
|
||||
contents := make([]byte, 5)
|
||||
n, err := rf.Read(contents)
|
||||
assert.Equal(t, n, 0)
|
||||
assert.Exactly(t, os.ErrNotExist, err)
|
||||
}
|
||||
|
||||
func TestRequestOpen(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
fh, err := p.cli.Open("foo")
|
||||
assert.Nil(t, err)
|
||||
err = fh.Close()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestRequestMkdir(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
err := p.cli.Mkdir("/foo")
|
||||
assert.Nil(t, err)
|
||||
r := p.testHandler()
|
||||
f, err := r.fetch("/foo")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, f.isdir)
|
||||
}
|
||||
|
||||
func TestRequestRemove(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
r := p.testHandler()
|
||||
_, err = r.fetch("/foo")
|
||||
assert.Nil(t, err)
|
||||
err = p.cli.Remove("/foo")
|
||||
assert.Nil(t, err)
|
||||
_, err = r.fetch("/foo")
|
||||
assert.Equal(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestRequestRename(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
r := p.testHandler()
|
||||
_, err = r.fetch("/foo")
|
||||
assert.Nil(t, err)
|
||||
err = p.cli.Rename("/foo", "/bar")
|
||||
assert.Nil(t, err)
|
||||
_, err = r.fetch("/bar")
|
||||
assert.Nil(t, err)
|
||||
_, err = r.fetch("/foo")
|
||||
assert.Equal(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestRequestRenameFail(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
_, err = putTestFile(p.cli, "/bar", "goodbye")
|
||||
assert.Nil(t, err)
|
||||
err = p.cli.Rename("/foo", "/bar")
|
||||
assert.IsType(t, &StatusError{}, err)
|
||||
}
|
||||
|
||||
func TestRequestStat(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
fi, err := p.cli.Stat("/foo")
|
||||
assert.Equal(t, fi.Name(), "foo")
|
||||
assert.Equal(t, fi.Size(), int64(5))
|
||||
assert.Equal(t, fi.Mode(), os.FileMode(0644))
|
||||
assert.NoError(t, testOsSys(fi.Sys()))
|
||||
}
|
||||
|
||||
// NOTE: Setstat is a noop in the request server tests, but we want to test
|
||||
// that is does nothing without crapping out.
|
||||
func TestRequestSetstat(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
mode := os.FileMode(0644)
|
||||
err = p.cli.Chmod("/foo", mode)
|
||||
assert.Nil(t, err)
|
||||
fi, err := p.cli.Stat("/foo")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fi.Name(), "foo")
|
||||
assert.Equal(t, fi.Size(), int64(5))
|
||||
assert.Equal(t, fi.Mode(), os.FileMode(0644))
|
||||
assert.NoError(t, testOsSys(fi.Sys()))
|
||||
}
|
||||
|
||||
func TestRequestFstat(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
fp, err := p.cli.Open("/foo")
|
||||
assert.Nil(t, err)
|
||||
fi, err := fp.Stat()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fi.Name(), "foo")
|
||||
assert.Equal(t, fi.Size(), int64(5))
|
||||
assert.Equal(t, fi.Mode(), os.FileMode(0644))
|
||||
assert.NoError(t, testOsSys(fi.Sys()))
|
||||
}
|
||||
|
||||
func TestRequestStatFail(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
fi, err := p.cli.Stat("/foo")
|
||||
assert.Nil(t, fi)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
func TestRequestSymlink(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
err = p.cli.Symlink("/foo", "/bar")
|
||||
assert.Nil(t, err)
|
||||
r := p.testHandler()
|
||||
fi, err := r.fetch("/bar")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, fi.Mode()&os.ModeSymlink == os.ModeSymlink)
|
||||
}
|
||||
|
||||
func TestRequestSymlinkFail(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
err := p.cli.Symlink("/foo", "/bar")
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
func TestRequestReadlink(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
_, err := putTestFile(p.cli, "/foo", "hello")
|
||||
assert.Nil(t, err)
|
||||
err = p.cli.Symlink("/foo", "/bar")
|
||||
assert.Nil(t, err)
|
||||
rl, err := p.cli.ReadLink("/bar")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "foo", rl)
|
||||
}
|
||||
|
||||
func TestRequestReaddir(t *testing.T) {
|
||||
p := clientRequestServerPair(t)
|
||||
defer p.Close()
|
||||
for i := 0; i < 100; i++ {
|
||||
fname := fmt.Sprintf("/foo_%02d", i)
|
||||
_, err := putTestFile(p.cli, fname, fname)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
di, err := p.cli.ReadDir("/")
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, di, 100)
|
||||
names := []string{di[18].Name(), di[81].Name()}
|
||||
assert.Equal(t, []string{"foo_18", "foo_81"}, names)
|
||||
}
|
23
vendor/src/github.com/pkg/sftp/request-unix.go
vendored
Normal file
23
vendor/src/github.com/pkg/sftp/request-unix.go
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build !windows
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func fakeFileInfoSys() interface{} {
|
||||
return &syscall.Stat_t{Uid: 65534, Gid: 65534}
|
||||
}
|
||||
|
||||
func testOsSys(sys interface{}) error {
|
||||
fstat := sys.(*FileStat)
|
||||
if fstat.UID != uint32(65534) {
|
||||
return errors.New("Uid failed to match.")
|
||||
}
|
||||
if fstat.GID != uint32(65534) {
|
||||
return errors.New("Gid failed to match:")
|
||||
}
|
||||
return nil
|
||||
}
|
334
vendor/src/github.com/pkg/sftp/request.go
vendored
Normal file
334
vendor/src/github.com/pkg/sftp/request.go
vendored
Normal file
|
@ -0,0 +1,334 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Request contains the data and state for the incoming service request.
|
||||
type Request struct {
|
||||
// Get, Put, Setstat, Stat, Rename, Remove
|
||||
// Rmdir, Mkdir, List, Readlink, Symlink
|
||||
Method string
|
||||
Filepath string
|
||||
Flags uint32
|
||||
Attrs []byte // convert to sub-struct
|
||||
Target string // for renames and sym-links
|
||||
// packet data
|
||||
pkt_id uint32
|
||||
packets chan packet_data
|
||||
// reader/writer/readdir from handlers
|
||||
stateLock *sync.RWMutex
|
||||
state *state
|
||||
}
|
||||
|
||||
type state struct {
|
||||
writerAt io.WriterAt
|
||||
readerAt io.ReaderAt
|
||||
endofdir bool // in case handler doesn't use EOF on file list
|
||||
readdirToken string
|
||||
}
|
||||
|
||||
type packet_data struct {
|
||||
id uint32
|
||||
data []byte
|
||||
length uint32
|
||||
offset int64
|
||||
}
|
||||
|
||||
// New Request initialized based on packet data
|
||||
func requestFromPacket(pkt hasPath) Request {
|
||||
method := requestMethod(pkt)
|
||||
request := NewRequest(method, pkt.getPath())
|
||||
request.pkt_id = pkt.id()
|
||||
switch p := pkt.(type) {
|
||||
case *sshFxpSetstatPacket:
|
||||
request.Flags = p.Flags
|
||||
request.Attrs = p.Attrs.([]byte)
|
||||
case *sshFxpRenamePacket:
|
||||
request.Target = filepath.Clean(p.Newpath)
|
||||
case *sshFxpSymlinkPacket:
|
||||
request.Target = filepath.Clean(p.Linkpath)
|
||||
}
|
||||
return request
|
||||
}
|
||||
|
||||
// NewRequest creates a new Request object.
|
||||
func NewRequest(method, path string) Request {
|
||||
request := Request{Method: method, Filepath: filepath.Clean(path)}
|
||||
request.packets = make(chan packet_data, sftpServerWorkerCount)
|
||||
request.state = &state{}
|
||||
request.stateLock = &sync.RWMutex{}
|
||||
return request
|
||||
}
|
||||
|
||||
// LsSave takes a token to keep track of file list batches. Openssh uses a
|
||||
// batch size of 100, so I suggest sticking close to that.
|
||||
func (r Request) LsSave(token string) {
|
||||
r.stateLock.RLock()
|
||||
defer r.stateLock.RUnlock()
|
||||
r.state.readdirToken = token
|
||||
}
|
||||
|
||||
// LsNext should return the token from the previous call to know which batch
|
||||
// to return next.
|
||||
func (r Request) LsNext() string {
|
||||
r.stateLock.RLock()
|
||||
defer r.stateLock.RUnlock()
|
||||
return r.state.readdirToken
|
||||
}
|
||||
|
||||
// manage file read/write state
|
||||
func (r Request) setFileState(s interface{}) {
|
||||
r.stateLock.Lock()
|
||||
defer r.stateLock.Unlock()
|
||||
switch s := s.(type) {
|
||||
case io.WriterAt:
|
||||
r.state.writerAt = s
|
||||
case io.ReaderAt:
|
||||
r.state.readerAt = s
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (r Request) getWriter() io.WriterAt {
|
||||
r.stateLock.RLock()
|
||||
defer r.stateLock.RUnlock()
|
||||
return r.state.writerAt
|
||||
}
|
||||
|
||||
func (r Request) getReader() io.ReaderAt {
|
||||
r.stateLock.RLock()
|
||||
defer r.stateLock.RUnlock()
|
||||
return r.state.readerAt
|
||||
}
|
||||
|
||||
// For backwards compatibility. The Handler didn't have batch handling at
|
||||
// first, and just always assumed 1 batch. This preserves that behavior.
|
||||
func (r Request) setEOD(eod bool) {
|
||||
r.stateLock.RLock()
|
||||
defer r.stateLock.RUnlock()
|
||||
r.state.endofdir = eod
|
||||
}
|
||||
|
||||
func (r Request) getEOD() bool {
|
||||
r.stateLock.RLock()
|
||||
defer r.stateLock.RUnlock()
|
||||
return r.state.endofdir
|
||||
}
|
||||
|
||||
// Close reader/writer if possible
|
||||
func (r Request) close() {
|
||||
rd := r.getReader()
|
||||
if c, ok := rd.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
wt := r.getWriter()
|
||||
if c, ok := wt.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// push packet_data into fifo
|
||||
func (r Request) pushPacket(pd packet_data) {
|
||||
r.packets <- pd
|
||||
}
|
||||
|
||||
// pop packet_data into fifo
|
||||
func (r *Request) popPacket() packet_data {
|
||||
return <-r.packets
|
||||
}
|
||||
|
||||
// called from worker to handle packet/request
|
||||
func (r Request) handle(handlers Handlers) (responsePacket, error) {
|
||||
var err error
|
||||
var rpkt responsePacket
|
||||
switch r.Method {
|
||||
case "Get":
|
||||
rpkt, err = fileget(handlers.FileGet, r)
|
||||
case "Put": // add "Append" to this to handle append only file writes
|
||||
rpkt, err = fileput(handlers.FilePut, r)
|
||||
case "Setstat", "Rename", "Rmdir", "Mkdir", "Symlink", "Remove":
|
||||
rpkt, err = filecmd(handlers.FileCmd, r)
|
||||
case "List", "Stat", "Readlink":
|
||||
rpkt, err = fileinfo(handlers.FileInfo, r)
|
||||
default:
|
||||
return rpkt, errors.Errorf("unexpected method: %s", r.Method)
|
||||
}
|
||||
return rpkt, err
|
||||
}
|
||||
|
||||
// wrap FileReader handler
|
||||
func fileget(h FileReader, r Request) (responsePacket, error) {
|
||||
var err error
|
||||
reader := r.getReader()
|
||||
if reader == nil {
|
||||
reader, err = h.Fileread(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setFileState(reader)
|
||||
}
|
||||
|
||||
pd := r.popPacket()
|
||||
data := make([]byte, clamp(pd.length, maxTxPacket))
|
||||
n, err := reader.ReadAt(data, pd.offset)
|
||||
if err != nil && (err != io.EOF || n == 0) {
|
||||
return nil, err
|
||||
}
|
||||
return &sshFxpDataPacket{
|
||||
ID: pd.id,
|
||||
Length: uint32(n),
|
||||
Data: data[:n],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// wrap FileWriter handler
|
||||
func fileput(h FileWriter, r Request) (responsePacket, error) {
|
||||
var err error
|
||||
writer := r.getWriter()
|
||||
if writer == nil {
|
||||
writer, err = h.Filewrite(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.setFileState(writer)
|
||||
}
|
||||
|
||||
pd := r.popPacket()
|
||||
_, err = writer.WriteAt(pd.data, pd.offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sshFxpStatusPacket{
|
||||
ID: pd.id,
|
||||
StatusError: StatusError{
|
||||
Code: ssh_FX_OK,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
// wrap FileCmder handler
|
||||
func filecmd(h FileCmder, r Request) (responsePacket, error) {
|
||||
err := h.Filecmd(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sshFxpStatusPacket{
|
||||
ID: r.pkt_id,
|
||||
StatusError: StatusError{
|
||||
Code: ssh_FX_OK,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
// wrap FileInfoer handler
|
||||
func fileinfo(h FileInfoer, r Request) (responsePacket, error) {
|
||||
if r.getEOD() {
|
||||
return nil, io.EOF
|
||||
}
|
||||
finfo, err := h.Fileinfo(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case "List":
|
||||
pd := r.popPacket()
|
||||
dirname := path.Base(r.Filepath)
|
||||
ret := &sshFxpNamePacket{ID: pd.id}
|
||||
for _, fi := range finfo {
|
||||
ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{
|
||||
Name: fi.Name(),
|
||||
LongName: runLs(dirname, fi),
|
||||
Attrs: []interface{}{fi},
|
||||
})
|
||||
}
|
||||
// No entries means we should return EOF as the Handler didn't.
|
||||
if len(finfo) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
// If files are returned but no token is set, return EOF next call.
|
||||
if r.LsNext() == "" {
|
||||
r.setEOD(true)
|
||||
}
|
||||
return ret, nil
|
||||
case "Stat":
|
||||
if len(finfo) == 0 {
|
||||
err = &os.PathError{Op: "stat", Path: r.Filepath,
|
||||
Err: syscall.ENOENT}
|
||||
return nil, err
|
||||
}
|
||||
return &sshFxpStatResponse{
|
||||
ID: r.pkt_id,
|
||||
info: finfo[0],
|
||||
}, nil
|
||||
case "Readlink":
|
||||
if len(finfo) == 0 {
|
||||
err = &os.PathError{Op: "readlink", Path: r.Filepath,
|
||||
Err: syscall.ENOENT}
|
||||
return nil, err
|
||||
}
|
||||
filename := finfo[0].Name()
|
||||
return &sshFxpNamePacket{
|
||||
ID: r.pkt_id,
|
||||
NameAttrs: []sshFxpNameAttr{{
|
||||
Name: filename,
|
||||
LongName: filename,
|
||||
Attrs: emptyFileStat,
|
||||
}},
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// file data for additional read/write packets
|
||||
func (r *Request) update(p hasHandle) error {
|
||||
pd := packet_data{id: p.id()}
|
||||
switch p := p.(type) {
|
||||
case *sshFxpReadPacket:
|
||||
r.Method = "Get"
|
||||
pd.length = p.Len
|
||||
pd.offset = int64(p.Offset)
|
||||
case *sshFxpWritePacket:
|
||||
r.Method = "Put"
|
||||
pd.data = p.Data
|
||||
pd.length = p.Length
|
||||
pd.offset = int64(p.Offset)
|
||||
case *sshFxpReaddirPacket:
|
||||
r.Method = "List"
|
||||
default:
|
||||
return errors.Errorf("unexpected packet type %T", p)
|
||||
}
|
||||
r.pushPacket(pd)
|
||||
return nil
|
||||
}
|
||||
|
||||
// init attributes of request object from packet data
|
||||
func requestMethod(p hasPath) (method string) {
|
||||
switch p.(type) {
|
||||
case *sshFxpOpenPacket, *sshFxpOpendirPacket:
|
||||
method = "Open"
|
||||
case *sshFxpSetstatPacket:
|
||||
method = "Setstat"
|
||||
case *sshFxpRenamePacket:
|
||||
method = "Rename"
|
||||
case *sshFxpSymlinkPacket:
|
||||
method = "Symlink"
|
||||
case *sshFxpRemovePacket:
|
||||
method = "Remove"
|
||||
case *sshFxpStatPacket, *sshFxpLstatPacket:
|
||||
method = "Stat"
|
||||
case *sshFxpRmdirPacket:
|
||||
method = "Rmdir"
|
||||
case *sshFxpReadlinkPacket:
|
||||
method = "Readlink"
|
||||
case *sshFxpMkdirPacket:
|
||||
method = "Mkdir"
|
||||
}
|
||||
return method
|
||||
}
|
182
vendor/src/github.com/pkg/sftp/request_test.go
vendored
Normal file
182
vendor/src/github.com/pkg/sftp/request_test.go
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testHandler struct {
|
||||
filecontents []byte // dummy contents
|
||||
output io.WriterAt // dummy file out
|
||||
err error // dummy error, should be file related
|
||||
}
|
||||
|
||||
func (t *testHandler) Fileread(r Request) (io.ReaderAt, error) {
|
||||
if t.err != nil {
|
||||
return nil, t.err
|
||||
}
|
||||
return bytes.NewReader(t.filecontents), nil
|
||||
}
|
||||
|
||||
func (t *testHandler) Filewrite(r Request) (io.WriterAt, error) {
|
||||
if t.err != nil {
|
||||
return nil, t.err
|
||||
}
|
||||
return io.WriterAt(t.output), nil
|
||||
}
|
||||
|
||||
func (t *testHandler) Filecmd(r Request) error {
|
||||
if t.err != nil {
|
||||
return t.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testHandler) Fileinfo(r Request) ([]os.FileInfo, error) {
|
||||
if t.err != nil {
|
||||
return nil, t.err
|
||||
}
|
||||
f, err := os.Open(r.Filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []os.FileInfo{fi}, nil
|
||||
}
|
||||
|
||||
// make sure len(fakefile) == len(filecontents)
|
||||
type fakefile [10]byte
|
||||
|
||||
var filecontents = []byte("file-data.")
|
||||
|
||||
func testRequest(method string) Request {
|
||||
request := Request{
|
||||
Filepath: "./request_test.go",
|
||||
Method: method,
|
||||
Attrs: []byte("foo"),
|
||||
Target: "foo",
|
||||
packets: make(chan packet_data, sftpServerWorkerCount),
|
||||
state: &state{},
|
||||
stateLock: &sync.RWMutex{},
|
||||
}
|
||||
for _, p := range []packet_data{
|
||||
packet_data{id: 1, data: filecontents[:5], length: 5},
|
||||
packet_data{id: 2, data: filecontents[5:], length: 5, offset: 5}} {
|
||||
request.packets <- p
|
||||
}
|
||||
return request
|
||||
}
|
||||
|
||||
func (ff *fakefile) WriteAt(p []byte, off int64) (int, error) {
|
||||
n := copy(ff[off:], p)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (ff fakefile) string() string {
|
||||
b := make([]byte, len(ff))
|
||||
copy(b, ff[:])
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func newTestHandlers() Handlers {
|
||||
handler := &testHandler{
|
||||
filecontents: filecontents,
|
||||
output: &fakefile{},
|
||||
err: nil,
|
||||
}
|
||||
return Handlers{
|
||||
FileGet: handler,
|
||||
FilePut: handler,
|
||||
FileCmd: handler,
|
||||
FileInfo: handler,
|
||||
}
|
||||
}
|
||||
|
||||
func (h Handlers) getOutString() string {
|
||||
handler := h.FilePut.(*testHandler)
|
||||
return handler.output.(*fakefile).string()
|
||||
}
|
||||
|
||||
var errTest = errors.New("test error")
|
||||
|
||||
func (h *Handlers) returnError() {
|
||||
handler := h.FilePut.(*testHandler)
|
||||
handler.err = errTest
|
||||
}
|
||||
|
||||
func statusOk(t *testing.T, p interface{}) {
|
||||
if pkt, ok := p.(*sshFxpStatusPacket); ok {
|
||||
assert.Equal(t, pkt.StatusError.Code, uint32(ssh_FX_OK))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestGet(t *testing.T) {
|
||||
handlers := newTestHandlers()
|
||||
request := testRequest("Get")
|
||||
// req.length is 5, so we test reads in 5 byte chunks
|
||||
for i, txt := range []string{"file-", "data."} {
|
||||
pkt, err := request.handle(handlers)
|
||||
assert.Nil(t, err)
|
||||
dpkt := pkt.(*sshFxpDataPacket)
|
||||
assert.Equal(t, dpkt.id(), uint32(i+1))
|
||||
assert.Equal(t, string(dpkt.Data), txt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestPut(t *testing.T) {
|
||||
handlers := newTestHandlers()
|
||||
request := testRequest("Put")
|
||||
pkt, err := request.handle(handlers)
|
||||
assert.Nil(t, err)
|
||||
statusOk(t, pkt)
|
||||
pkt, err = request.handle(handlers)
|
||||
assert.Nil(t, err)
|
||||
statusOk(t, pkt)
|
||||
assert.Equal(t, "file-data.", handlers.getOutString())
|
||||
}
|
||||
|
||||
func TestRequestCmdr(t *testing.T) {
|
||||
handlers := newTestHandlers()
|
||||
request := testRequest("Mkdir")
|
||||
pkt, err := request.handle(handlers)
|
||||
assert.Nil(t, err)
|
||||
statusOk(t, pkt)
|
||||
|
||||
handlers.returnError()
|
||||
pkt, err = request.handle(handlers)
|
||||
assert.Nil(t, pkt)
|
||||
assert.Equal(t, err, errTest)
|
||||
}
|
||||
|
||||
func TestRequestInfoList(t *testing.T) { testInfoMethod(t, "List") }
|
||||
func TestRequestInfoReadlink(t *testing.T) { testInfoMethod(t, "Readlink") }
|
||||
func TestRequestInfoStat(t *testing.T) {
|
||||
handlers := newTestHandlers()
|
||||
request := testRequest("Stat")
|
||||
pkt, err := request.handle(handlers)
|
||||
assert.Nil(t, err)
|
||||
spkt, ok := pkt.(*sshFxpStatResponse)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, spkt.info.Name(), "request_test.go")
|
||||
}
|
||||
|
||||
func testInfoMethod(t *testing.T, method string) {
|
||||
handlers := newTestHandlers()
|
||||
request := testRequest(method)
|
||||
pkt, err := request.handle(handlers)
|
||||
assert.Nil(t, err)
|
||||
npkt, ok := pkt.(*sshFxpNamePacket)
|
||||
assert.True(t, ok)
|
||||
assert.IsType(t, sshFxpNameAttr{}, npkt.NameAttrs[0])
|
||||
assert.Equal(t, npkt.NameAttrs[0].Name, "request_test.go")
|
||||
}
|
11
vendor/src/github.com/pkg/sftp/request_windows.go
vendored
Normal file
11
vendor/src/github.com/pkg/sftp/request_windows.go
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
package sftp
|
||||
|
||||
import "syscall"
|
||||
|
||||
func fakeFileInfoSys() interface{} {
|
||||
return syscall.Win32FileAttributeData{}
|
||||
}
|
||||
|
||||
func testOsSys(sys interface{}) error {
|
||||
return nil
|
||||
}
|
122
vendor/src/github.com/pkg/sftp/server.go
vendored
122
vendor/src/github.com/pkg/sftp/server.go
vendored
|
@ -26,10 +26,10 @@ const (
|
|||
// This implementation currently supports most of sftp server protocol version 3,
|
||||
// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
|
||||
type Server struct {
|
||||
serverConn
|
||||
*serverConn
|
||||
debugStream io.Writer
|
||||
readOnly bool
|
||||
pktChan chan rxPacket
|
||||
pktMgr packetManager
|
||||
openFiles map[string]*os.File
|
||||
openFilesLock sync.RWMutex
|
||||
handleCount int
|
||||
|
@ -75,15 +75,16 @@ type serverRespondablePacket interface {
|
|||
//
|
||||
// A subsequent call to Serve() is required to begin serving files over SFTP.
|
||||
func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) {
|
||||
s := &Server{
|
||||
serverConn: serverConn{
|
||||
svrConn := &serverConn{
|
||||
conn: conn{
|
||||
Reader: rwc,
|
||||
WriteCloser: rwc,
|
||||
},
|
||||
},
|
||||
}
|
||||
s := &Server{
|
||||
serverConn: svrConn,
|
||||
debugStream: ioutil.Discard,
|
||||
pktChan: make(chan rxPacket, sftpServerWorkerCount),
|
||||
pktMgr: newPktMgr(svrConn),
|
||||
openFiles: make(map[string]*os.File),
|
||||
maxTxPacket: 1 << 15,
|
||||
}
|
||||
|
@ -122,72 +123,14 @@ type rxPacket struct {
|
|||
}
|
||||
|
||||
// Up to N parallel servers
|
||||
func (svr *Server) sftpServerWorker() error {
|
||||
for p := range svr.pktChan {
|
||||
var pkt interface {
|
||||
encoding.BinaryUnmarshaler
|
||||
id() uint32
|
||||
}
|
||||
var readonly = true
|
||||
switch p.pktType {
|
||||
case ssh_FXP_INIT:
|
||||
pkt = &sshFxInitPacket{}
|
||||
case ssh_FXP_LSTAT:
|
||||
pkt = &sshFxpLstatPacket{}
|
||||
case ssh_FXP_OPEN:
|
||||
pkt = &sshFxpOpenPacket{}
|
||||
// readonly handled specially below
|
||||
case ssh_FXP_CLOSE:
|
||||
pkt = &sshFxpClosePacket{}
|
||||
case ssh_FXP_READ:
|
||||
pkt = &sshFxpReadPacket{}
|
||||
case ssh_FXP_WRITE:
|
||||
pkt = &sshFxpWritePacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_FSTAT:
|
||||
pkt = &sshFxpFstatPacket{}
|
||||
case ssh_FXP_SETSTAT:
|
||||
pkt = &sshFxpSetstatPacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_FSETSTAT:
|
||||
pkt = &sshFxpFsetstatPacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_OPENDIR:
|
||||
pkt = &sshFxpOpendirPacket{}
|
||||
case ssh_FXP_READDIR:
|
||||
pkt = &sshFxpReaddirPacket{}
|
||||
case ssh_FXP_REMOVE:
|
||||
pkt = &sshFxpRemovePacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_MKDIR:
|
||||
pkt = &sshFxpMkdirPacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_RMDIR:
|
||||
pkt = &sshFxpRmdirPacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_REALPATH:
|
||||
pkt = &sshFxpRealpathPacket{}
|
||||
case ssh_FXP_STAT:
|
||||
pkt = &sshFxpStatPacket{}
|
||||
case ssh_FXP_RENAME:
|
||||
pkt = &sshFxpRenamePacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_READLINK:
|
||||
pkt = &sshFxpReadlinkPacket{}
|
||||
case ssh_FXP_SYMLINK:
|
||||
pkt = &sshFxpSymlinkPacket{}
|
||||
readonly = false
|
||||
case ssh_FXP_EXTENDED:
|
||||
pkt = &sshFxpExtendedPacket{}
|
||||
default:
|
||||
return errors.Errorf("unhandled packet type: %s", p.pktType)
|
||||
}
|
||||
if err := pkt.UnmarshalBinary(p.pktBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
func (svr *Server) sftpServerWorker(pktChan chan requestPacket) error {
|
||||
for pkt := range pktChan {
|
||||
|
||||
// handle FXP_OPENDIR specially
|
||||
// readonly checks
|
||||
readonly := true
|
||||
switch pkt := pkt.(type) {
|
||||
case notReadOnly:
|
||||
readonly = false
|
||||
case *sshFxpOpenPacket:
|
||||
readonly = pkt.readonly()
|
||||
case *sshFxpExtendedPacket:
|
||||
|
@ -288,6 +231,7 @@ func handlePacket(s *Server, p interface{}) error {
|
|||
return s.sendError(p, err)
|
||||
}
|
||||
f = filepath.Clean(f)
|
||||
f = filepath.ToSlash(f) // make path more Unix like on windows servers
|
||||
return s.sendPacket(sshFxpNamePacket{
|
||||
ID: p.ID,
|
||||
NameAttrs: []sshFxpNameAttr{{
|
||||
|
@ -338,17 +282,19 @@ func handlePacket(s *Server, p interface{}) error {
|
|||
// is stopped.
|
||||
func (svr *Server) Serve() error {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(sftpServerWorkerCount)
|
||||
for i := 0; i < sftpServerWorkerCount; i++ {
|
||||
runWorker := func(ch requestChan) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := svr.sftpServerWorker(); err != nil {
|
||||
if err := svr.sftpServerWorker(ch); err != nil {
|
||||
svr.conn.Close() // shuts down recvPacket
|
||||
}
|
||||
}()
|
||||
}
|
||||
pktChan := svr.pktMgr.workerChan(runWorker)
|
||||
|
||||
var err error
|
||||
var pkt requestPacket
|
||||
var pktType uint8
|
||||
var pktBytes []byte
|
||||
for {
|
||||
|
@ -356,10 +302,18 @@ func (svr *Server) Serve() error {
|
|||
if err != nil {
|
||||
break
|
||||
}
|
||||
svr.pktChan <- rxPacket{fxp(pktType), pktBytes}
|
||||
|
||||
pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes})
|
||||
if err != nil {
|
||||
debug("makePacket err: %v", err)
|
||||
svr.conn.Close() // shuts down recvPacket
|
||||
break
|
||||
}
|
||||
|
||||
close(svr.pktChan) // shuts down sftpServerWorkers
|
||||
pktChan <- pkt
|
||||
}
|
||||
|
||||
close(pktChan) // shuts down sftpServerWorkers
|
||||
wg.Wait() // wait for all workers to exit
|
||||
|
||||
// close any still-open files
|
||||
|
@ -370,7 +324,21 @@ func (svr *Server) Serve() error {
|
|||
return err // error from recvPacket
|
||||
}
|
||||
|
||||
type id interface {
|
||||
// Wrap underlying connection methods to use packetManager
|
||||
func (svr *Server) sendPacket(m encoding.BinaryMarshaler) error {
|
||||
if pkt, ok := m.(responsePacket); ok {
|
||||
svr.pktMgr.readyPacket(pkt)
|
||||
} else {
|
||||
return errors.Errorf("unexpected packet type %T", m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svr *Server) sendError(p ider, err error) error {
|
||||
return svr.sendPacket(statusFromError(p, err))
|
||||
}
|
||||
|
||||
type ider interface {
|
||||
id() uint32
|
||||
}
|
||||
|
||||
|
@ -565,7 +533,7 @@ func translateErrno(errno syscall.Errno) uint32 {
|
|||
return ssh_FX_FAILURE
|
||||
}
|
||||
|
||||
func statusFromError(p id, err error) sshFxpStatusPacket {
|
||||
func statusFromError(p ider, err error) sshFxpStatusPacket {
|
||||
ret := sshFxpStatusPacket{
|
||||
ID: p.id(),
|
||||
StatusError: StatusError{
|
||||
|
|
|
@ -18,6 +18,7 @@ func main() {
|
|||
readOnly bool
|
||||
debugStderr bool
|
||||
debugLevel string
|
||||
options []sftp.ServerOption
|
||||
)
|
||||
|
||||
flag.BoolVar(&readOnly, "R", false, "read-only server")
|
||||
|
@ -29,6 +30,11 @@ func main() {
|
|||
if debugStderr {
|
||||
debugStream = os.Stderr
|
||||
}
|
||||
options = append(options, sftp.WithDebug(debugStream))
|
||||
|
||||
if readOnly {
|
||||
options = append(options, sftp.ReadOnly())
|
||||
}
|
||||
|
||||
svr, _ := sftp.NewServer(
|
||||
struct {
|
||||
|
@ -37,8 +43,7 @@ func main() {
|
|||
}{os.Stdin,
|
||||
os.Stdout,
|
||||
},
|
||||
sftp.WithDebug(debugStream),
|
||||
sftp.ReadOnly(),
|
||||
options...,
|
||||
)
|
||||
if err := svr.Serve(); err != nil {
|
||||
fmt.Fprintf(debugStream, "sftp server completed with error: %v", err)
|
||||
|
|
|
@ -16,7 +16,6 @@ func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) {
|
|||
Files: stat.Files,
|
||||
Ffree: stat.Ffree,
|
||||
Favail: stat.Ffree, // not sure how to calculate Favail
|
||||
Fsid: uint64(uint64(stat.Fsid.X__val[1])<<32 | uint64(stat.Fsid.X__val[0])), // endianness?
|
||||
Flag: uint64(stat.Flags), // assuming POSIX?
|
||||
Namemax: uint64(stat.Namelen),
|
||||
}, nil
|
||||
|
|
33
vendor/src/github.com/pkg/sftp/server_test.go
vendored
33
vendor/src/github.com/pkg/sftp/server_test.go
vendored
|
@ -2,6 +2,7 @@ package sftp
|
|||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -46,8 +47,10 @@ func (p sshFxpTestBadExtendedPacket) MarshalBinary() ([]byte, error) {
|
|||
|
||||
// test that errors are sent back when we request an invalid extended packet operation
|
||||
func TestInvalidExtendedPacket(t *testing.T) {
|
||||
client, _ := clientServerPair(t)
|
||||
client, server := clientServerPair(t)
|
||||
defer client.Close()
|
||||
defer server.Close()
|
||||
|
||||
badPacket := sshFxpTestBadExtendedPacket{client.nextID(), "thisDoesn'tExist", "foobar"}
|
||||
_, _, err := client.clientConn.sendPacket(badPacket)
|
||||
if err == nil {
|
||||
|
@ -62,3 +65,31 @@ func TestInvalidExtendedPacket(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
// test that server handles concurrent requests correctly
|
||||
func TestConcurrentRequests(t *testing.T) {
|
||||
client, server := clientServerPair(t)
|
||||
defer client.Close()
|
||||
defer server.Close()
|
||||
|
||||
concurrency := 2
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrency)
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < 1024; j++ {
|
||||
f, err := client.Open("/etc/passwd")
|
||||
if err != nil {
|
||||
t.Errorf("failed to open file: %v", err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
t.Errorf("failed t close file: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
|
11
vendor/src/github.com/pkg/xattr/README.md
vendored
11
vendor/src/github.com/pkg/xattr/README.md
vendored
|
@ -18,8 +18,17 @@ Extended attribute support for Go (linux + darwin + freebsd).
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var list []string
|
||||
if list, err = xattr.List(path); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var data []byte
|
||||
data, err = xattr.Get(path, prefix+"test"); err != nil {
|
||||
if data, err = xattr.Get(path, prefix+"test"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err = xattr.Remove(path, prefix+"test"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
const UserPrefix = "user."
|
||||
|
||||
func Test_setxattr(t *testing.T) {
|
||||
func Test(t *testing.T) {
|
||||
tmp, err := ioutil.TempFile("", "")
|
||||
|
||||
if err != nil {
|
||||
|
|
10
vendor/src/github.com/restic/chunker/chunker.go
vendored
10
vendor/src/github.com/restic/chunker/chunker.go
vendored
|
@ -87,6 +87,12 @@ type Chunker struct {
|
|||
|
||||
// New returns a new Chunker based on polynomial p that reads from rd.
|
||||
func New(rd io.Reader, pol Pol) *Chunker {
|
||||
return NewWithBoundaries(rd, pol, MinSize, MaxSize)
|
||||
}
|
||||
|
||||
// NewWithBoundaries returns a new Chunker based on polynomial p that reads from
|
||||
// rd and custom min and max size boundaries
|
||||
func NewWithBoundaries(rd io.Reader, pol Pol, min, max uint) *Chunker {
|
||||
c := &Chunker{
|
||||
chunkerState: chunkerState{
|
||||
buf: make([]byte, chunkerBufSize),
|
||||
|
@ -94,8 +100,8 @@ func New(rd io.Reader, pol Pol) *Chunker {
|
|||
chunkerConfig: chunkerConfig{
|
||||
pol: pol,
|
||||
rd: rd,
|
||||
MinSize: MinSize,
|
||||
MaxSize: MaxSize,
|
||||
MinSize: min,
|
||||
MaxSize: max,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -114,10 +114,10 @@ func testWithData(t *testing.T, chnker *Chunker, testChunks []chunk, checkDigest
|
|||
return chunks
|
||||
}
|
||||
|
||||
func getRandom(seed, count int) []byte {
|
||||
func getRandom(seed int64, count int) []byte {
|
||||
buf := make([]byte, count)
|
||||
|
||||
rnd := rand.New(rand.NewSource(23))
|
||||
rnd := rand.New(rand.NewSource(seed))
|
||||
for i := 0; i < count; i += 4 {
|
||||
r := rnd.Uint32()
|
||||
buf[i] = byte(r)
|
||||
|
|
138
vendor/src/github.com/spf13/cobra/README.md
vendored
138
vendor/src/github.com/spf13/cobra/README.md
vendored
|
@ -8,7 +8,7 @@ Many of the most widely used Go projects are built using Cobra including:
|
|||
* [Hugo](http://gohugo.io)
|
||||
* [rkt](https://github.com/coreos/rkt)
|
||||
* [etcd](https://github.com/coreos/etcd)
|
||||
* [Docker](https://github.com/docker/docker)
|
||||
* [Moby (former Docker)](https://github.com/moby/moby)
|
||||
* [Docker (distribution)](https://github.com/docker/distribution)
|
||||
* [OpenShift](https://www.openshift.com/)
|
||||
* [Delve](https://github.com/derekparker/delve)
|
||||
|
@ -16,9 +16,9 @@ Many of the most widely used Go projects are built using Cobra including:
|
|||
* [CockroachDB](http://www.cockroachlabs.com/)
|
||||
* [Bleve](http://www.blevesearch.com/)
|
||||
* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
|
||||
* [Parse (CLI)](https://parse.com/)
|
||||
* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
|
||||
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
|
||||
* [rclone](http://rclone.org/)
|
||||
|
||||
|
||||
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
|
||||
|
@ -40,7 +40,7 @@ Cobra provides:
|
|||
* Fully POSIX-compliant flags (including short & long versions)
|
||||
* Nested subcommands
|
||||
* Global, local and cascading flags
|
||||
* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname`
|
||||
* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
|
||||
* Intelligent suggestions (`app srver`... did you mean `app server`?)
|
||||
* Automatic help generation for commands and flags
|
||||
* Automatic detailed help for `app help [command]`
|
||||
|
@ -48,7 +48,7 @@ Cobra provides:
|
|||
* Automatically generated bash autocomplete for your application
|
||||
* Automatically generated man pages for your application
|
||||
* Command aliases so you can change things without breaking them
|
||||
* The flexibilty to define your own help, usage, etc.
|
||||
* The flexibility to define your own help, usage, etc.
|
||||
* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
|
||||
|
||||
Cobra has an exceptionally clean interface and simple design without needless
|
||||
|
@ -79,11 +79,11 @@ A few good real world examples may better illustrate this point.
|
|||
|
||||
In the following example, 'server' is a command, and 'port' is a flag:
|
||||
|
||||
> hugo server --port=1313
|
||||
hugo server --port=1313
|
||||
|
||||
In this command we are telling Git to clone the url bare.
|
||||
|
||||
> git clone URL --bare
|
||||
git clone URL --bare
|
||||
|
||||
## Commands
|
||||
|
||||
|
@ -114,7 +114,7 @@ and flags that are only available to that command.
|
|||
In the example above, 'port' is the flag.
|
||||
|
||||
Flag functionality is provided by the [pflag
|
||||
library](https://github.com/ogier/pflag), a fork of the flag standard library
|
||||
library](https://github.com/spf13/pflag), a fork of the flag standard library
|
||||
which maintains the same interface while adding POSIX compliance.
|
||||
|
||||
## Usage
|
||||
|
@ -127,10 +127,10 @@ tree is assigned to the commander which is finally executed.
|
|||
|
||||
# Installing
|
||||
Using Cobra is easy. First, use `go get` to install the latest version
|
||||
of the library. This command will install the `cobra` generator executible
|
||||
along with the library:
|
||||
of the library. This command will install the `cobra` generator executable
|
||||
along with the library and its dependencies:
|
||||
|
||||
> go get -v github.com/spf13/cobra/cobra
|
||||
go get -u github.com/spf13/cobra/cobra
|
||||
|
||||
Next, include Cobra in your application:
|
||||
|
||||
|
@ -140,8 +140,8 @@ import "github.com/spf13/cobra"
|
|||
|
||||
# Getting Started
|
||||
|
||||
While you are welcome to provide your own organization, typically a Cobra based
|
||||
application will follow the following organizational structure.
|
||||
While you are welcome to provide your own organization, typically a Cobra-based
|
||||
application will follow the following organizational structure:
|
||||
|
||||
```
|
||||
▾ appName/
|
||||
|
@ -153,7 +153,7 @@ application will follow the following organizational structure.
|
|||
main.go
|
||||
```
|
||||
|
||||
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
|
||||
In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
@ -180,7 +180,7 @@ commands you want. It's the easiest way to incorporate Cobra into your applicati
|
|||
|
||||
In order to use the cobra command, compile it using the following command:
|
||||
|
||||
> go install github.com/spf13/cobra/cobra
|
||||
go get github.com/spf13/cobra/cobra
|
||||
|
||||
This will create the cobra executable under your `$GOPATH/bin` directory.
|
||||
|
||||
|
@ -215,7 +215,12 @@ cobra add config
|
|||
cobra add create -p 'configCmd'
|
||||
```
|
||||
|
||||
Once you have run these three commands you would have an app structure that would look like:
|
||||
*Note: Use camelCase (not snake_case/snake-case) for command names.
|
||||
Otherwise, you will encounter errors.
|
||||
For example, `cobra add add-user` is incorrect, but `cobra add addUser` is valid.*
|
||||
|
||||
Once you have run these three commands you would have an app structure similar to
|
||||
the following:
|
||||
|
||||
```
|
||||
▾ app/
|
||||
|
@ -226,16 +231,16 @@ Once you have run these three commands you would have an app structure that woul
|
|||
main.go
|
||||
```
|
||||
|
||||
at this point you can run `go run main.go` and it would run your app. `go run
|
||||
At this point you can run `go run main.go` and it would run your app. `go run
|
||||
main.go serve`, `go run main.go config`, `go run main.go config create` along
|
||||
with `go run main.go help serve`, etc would all work.
|
||||
with `go run main.go help serve`, etc. would all work.
|
||||
|
||||
Obviously you haven't added your own code to these yet, the commands are ready
|
||||
for you to give them their tasks. Have fun.
|
||||
Obviously you haven't added your own code to these yet. The commands are ready
|
||||
for you to give them their tasks. Have fun!
|
||||
|
||||
### Configuring the cobra generator
|
||||
|
||||
The cobra generator will be easier to use if you provide a simple configuration
|
||||
The Cobra generator will be easier to use if you provide a simple configuration
|
||||
file which will help you eliminate providing a bunch of repeated information in
|
||||
flags over and over.
|
||||
|
||||
|
@ -260,16 +265,18 @@ license:
|
|||
master my life.
|
||||
```
|
||||
|
||||
You can also use built-in licenses. For example, **GPLv2**, **GPLv3**, **LGPL**,
|
||||
**AGPL**, **MIT**, **2-Clause BSD** or **3-Clause BSD**.
|
||||
|
||||
## Manually implementing Cobra
|
||||
|
||||
To manually implement cobra you need to create a bare main.go file and a RootCmd file.
|
||||
To manually implement Cobra you need to create a bare main.go file and a RootCmd file.
|
||||
You will optionally provide additional commands as you see fit.
|
||||
|
||||
### Create the root command
|
||||
|
||||
The root command represents your binary itself.
|
||||
|
||||
|
||||
#### Manually create rootCmd
|
||||
|
||||
Cobra doesn't require any special constructors. Simply create your commands.
|
||||
|
@ -291,9 +298,18 @@ var RootCmd = &cobra.Command{
|
|||
|
||||
You will additionally define flags and handle configuration in your init() function.
|
||||
|
||||
for example cmd/root.go:
|
||||
For example cmd/root.go:
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
|
||||
|
@ -307,6 +323,34 @@ func init() {
|
|||
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
|
||||
viper.SetDefault("license", "apache")
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
RootCmd.Execute()
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
// Don't forget to read config either from cfgFile or from home directory!
|
||||
if cfgFile != "" {
|
||||
// Use config file from the flag.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
} else {
|
||||
// Find home directory.
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Search config in home directory with name ".cobra" (without extension).
|
||||
viper.AddConfigPath(home)
|
||||
viper.SetConfigName(".cobra")
|
||||
}
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
fmt.Println("Can't read config:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Create your main.go
|
||||
|
@ -334,7 +378,6 @@ func main() {
|
|||
}
|
||||
```
|
||||
|
||||
|
||||
### Create additional commands
|
||||
|
||||
Additional commands can be defined and typically are each given their own file
|
||||
|
@ -424,6 +467,23 @@ A flag can also be assigned locally which will only apply to that specific comma
|
|||
RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
|
||||
```
|
||||
|
||||
### Bind Flags with Config
|
||||
|
||||
You can also bind your flags with [viper](https://github.com/spf13/viper):
|
||||
```go
|
||||
var author string
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
|
||||
viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
|
||||
}
|
||||
```
|
||||
|
||||
In this example the persistent flag `author` is bound with `viper`.
|
||||
**Note**, that the variable `author` will not be set to the value from config,
|
||||
when the `--author` flag is not provided by user.
|
||||
|
||||
More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
|
||||
|
||||
## Example
|
||||
|
||||
|
@ -853,39 +913,12 @@ out everything Cobra knows about the flags for each command.
|
|||
command.DebugFlags()
|
||||
```
|
||||
|
||||
## Release Notes
|
||||
* **0.9.0** June 17, 2014
|
||||
* flags can appears anywhere in the args (provided they are unambiguous)
|
||||
* --help prints usage screen for app or command
|
||||
* Prefix matching for commands
|
||||
* Cleaner looking help and usage output
|
||||
* Extensive test suite
|
||||
* **0.8.0** Nov 5, 2013
|
||||
* Reworked interface to remove commander completely
|
||||
* Command now primary structure
|
||||
* No initialization needed
|
||||
* Usage & Help templates & functions definable at any level
|
||||
* Updated Readme
|
||||
* **0.7.0** Sept 24, 2013
|
||||
* Needs more eyes
|
||||
* Test suite
|
||||
* Support for automatic error messages
|
||||
* Support for help command
|
||||
* Support for printing to any io.Writer instead of os.Stderr
|
||||
* Support for persistent flags which cascade down tree
|
||||
* Ready for integration into Hugo
|
||||
* **0.1.0** Sept 3, 2013
|
||||
* Implement first draft
|
||||
|
||||
## Extensions
|
||||
|
||||
Libraries for extending Cobra:
|
||||
|
||||
* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`.
|
||||
|
||||
## ToDo
|
||||
* Launch proper documentation site
|
||||
|
||||
## Contributing
|
||||
|
||||
1. Fork it
|
||||
|
@ -905,6 +938,3 @@ Names in no particular order:
|
|||
## License
|
||||
|
||||
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
|
||||
|
||||
|
||||
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package cobra
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -18,12 +19,9 @@ const (
|
|||
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
|
||||
)
|
||||
|
||||
func preamble(out io.Writer, name string) error {
|
||||
_, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preamStr := `
|
||||
func writePreamble(buf *bytes.Buffer, name string) {
|
||||
buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
|
||||
buf.WriteString(`
|
||||
__debug()
|
||||
{
|
||||
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
|
||||
|
@ -134,7 +132,10 @@ __handle_reply()
|
|||
declare -F __custom_func >/dev/null && __custom_func
|
||||
fi
|
||||
|
||||
# available in bash-completion >= 2, not always present on macOS
|
||||
if declare -F __ltrim_colon_completions >/dev/null; then
|
||||
__ltrim_colon_completions "$cur"
|
||||
fi
|
||||
}
|
||||
|
||||
# The arguments should be in the form "ext1|ext2|extn"
|
||||
|
@ -247,18 +248,13 @@ __handle_word()
|
|||
__handle_word
|
||||
}
|
||||
|
||||
`
|
||||
_, err = fmt.Fprint(out, preamStr)
|
||||
return err
|
||||
`)
|
||||
}
|
||||
|
||||
func postscript(w io.Writer, name string) error {
|
||||
func writePostscript(buf *bytes.Buffer, name string) {
|
||||
name = strings.Replace(name, ":", "__", -1)
|
||||
_, err := fmt.Fprintf(w, "__start_%s()\n", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(w, `{
|
||||
buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
|
||||
buf.WriteString(fmt.Sprintf(`{
|
||||
local cur prev words cword
|
||||
declare -A flaghash 2>/dev/null || :
|
||||
if declare -F _init_completion >/dev/null 2>&1; then
|
||||
|
@ -282,197 +278,132 @@ func postscript(w io.Writer, name string) error {
|
|||
__handle_word
|
||||
}
|
||||
|
||||
`, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
`, name))
|
||||
buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
complete -o default -F __start_%s %s
|
||||
else
|
||||
complete -o default -o nospace -F __start_%s %s
|
||||
fi
|
||||
|
||||
`, name, name, name, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n")
|
||||
return err
|
||||
`, name, name, name, name))
|
||||
buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
|
||||
}
|
||||
|
||||
func writeCommands(cmd *Command, w io.Writer) error {
|
||||
if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
func writeCommands(buf *bytes.Buffer, cmd *Command) {
|
||||
buf.WriteString(" commands=()\n")
|
||||
for _, c := range cmd.Commands() {
|
||||
if !c.IsAvailableCommand() || c == cmd.helpCommand {
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil {
|
||||
return err
|
||||
buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
|
||||
}
|
||||
}
|
||||
_, err := fmt.Fprintf(w, "\n")
|
||||
return err
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error {
|
||||
func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string) {
|
||||
for key, value := range annotations {
|
||||
switch key {
|
||||
case BashCompFilenameExt:
|
||||
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
|
||||
|
||||
var ext string
|
||||
if len(value) > 0 {
|
||||
ext := "__handle_filename_extension_flag " + strings.Join(value, "|")
|
||||
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
|
||||
ext = "__handle_filename_extension_flag " + strings.Join(value, "|")
|
||||
} else {
|
||||
ext := "_filedir"
|
||||
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
ext = "_filedir"
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
|
||||
case BashCompCustom:
|
||||
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
|
||||
if len(value) > 0 {
|
||||
handlers := strings.Join(value, "; ")
|
||||
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers)
|
||||
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, " flags_completion+=(:)\n")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
buf.WriteString(" flags_completion+=(:)\n")
|
||||
}
|
||||
case BashCompSubdirsInDir:
|
||||
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
|
||||
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
|
||||
|
||||
var ext string
|
||||
if len(value) == 1 {
|
||||
ext := "__handle_subdirs_in_dir_flag " + value[0]
|
||||
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
|
||||
ext = "__handle_subdirs_in_dir_flag " + value[0]
|
||||
} else {
|
||||
ext := "_filedir -d"
|
||||
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
|
||||
ext = "_filedir -d"
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeShortFlag(flag *pflag.Flag, w io.Writer) error {
|
||||
b := (len(flag.NoOptDefVal) > 0)
|
||||
func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) {
|
||||
name := flag.Shorthand
|
||||
format := " "
|
||||
if !b {
|
||||
if len(flag.NoOptDefVal) == 0 {
|
||||
format += "two_word_"
|
||||
}
|
||||
format += "flags+=(\"-%s\")\n"
|
||||
if _, err := fmt.Fprintf(w, format, name); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeFlagHandler("-"+name, flag.Annotations, w)
|
||||
buf.WriteString(fmt.Sprintf(format, name))
|
||||
writeFlagHandler(buf, "-"+name, flag.Annotations)
|
||||
}
|
||||
|
||||
func writeFlag(flag *pflag.Flag, w io.Writer) error {
|
||||
b := (len(flag.NoOptDefVal) > 0)
|
||||
func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) {
|
||||
name := flag.Name
|
||||
format := " flags+=(\"--%s"
|
||||
if !b {
|
||||
if len(flag.NoOptDefVal) == 0 {
|
||||
format += "="
|
||||
}
|
||||
format += "\")\n"
|
||||
if _, err := fmt.Fprintf(w, format, name); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeFlagHandler("--"+name, flag.Annotations, w)
|
||||
buf.WriteString(fmt.Sprintf(format, name))
|
||||
writeFlagHandler(buf, "--"+name, flag.Annotations)
|
||||
}
|
||||
|
||||
func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error {
|
||||
b := (len(flag.NoOptDefVal) > 0)
|
||||
func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
|
||||
name := flag.Name
|
||||
format := " local_nonpersistent_flags+=(\"--%s"
|
||||
if !b {
|
||||
if len(flag.NoOptDefVal) == 0 {
|
||||
format += "="
|
||||
}
|
||||
format += "\")\n"
|
||||
_, err := fmt.Fprintf(w, format, name)
|
||||
return err
|
||||
buf.WriteString(fmt.Sprintf(format, name))
|
||||
}
|
||||
|
||||
func writeFlags(cmd *Command, w io.Writer) error {
|
||||
_, err := fmt.Fprintf(w, ` flags=()
|
||||
func writeFlags(buf *bytes.Buffer, cmd *Command) {
|
||||
buf.WriteString(` flags=()
|
||||
two_word_flags=()
|
||||
local_nonpersistent_flags=()
|
||||
flags_with_completion=()
|
||||
flags_completion=()
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
|
||||
var visitErr error
|
||||
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
if nonCompletableFlag(flag) {
|
||||
return
|
||||
}
|
||||
if err := writeFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
writeFlag(buf, flag)
|
||||
if len(flag.Shorthand) > 0 {
|
||||
if err := writeShortFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
writeShortFlag(buf, flag)
|
||||
}
|
||||
if localNonPersistentFlags.Lookup(flag.Name) != nil {
|
||||
if err := writeLocalNonPersistentFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
writeLocalNonPersistentFlag(buf, flag)
|
||||
}
|
||||
})
|
||||
if visitErr != nil {
|
||||
return visitErr
|
||||
}
|
||||
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
if nonCompletableFlag(flag) {
|
||||
return
|
||||
}
|
||||
if err := writeFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
writeFlag(buf, flag)
|
||||
if len(flag.Shorthand) > 0 {
|
||||
if err := writeShortFlag(flag, w); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
writeShortFlag(buf, flag)
|
||||
}
|
||||
})
|
||||
if visitErr != nil {
|
||||
return visitErr
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(w, "\n")
|
||||
return err
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
func writeRequiredFlag(cmd *Command, w io.Writer) error {
|
||||
if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
|
||||
buf.WriteString(" must_have_one_flag=()\n")
|
||||
flags := cmd.NonInheritedFlags()
|
||||
var visitErr error
|
||||
flags.VisitAll(func(flag *pflag.Flag) {
|
||||
if nonCompletableFlag(flag) {
|
||||
return
|
||||
|
@ -481,107 +412,68 @@ func writeRequiredFlag(cmd *Command, w io.Writer) error {
|
|||
switch key {
|
||||
case BashCompOneRequiredFlag:
|
||||
format := " must_have_one_flag+=(\"--%s"
|
||||
b := (flag.Value.Type() == "bool")
|
||||
if !b {
|
||||
if flag.Value.Type() != "bool" {
|
||||
format += "="
|
||||
}
|
||||
format += "\")\n"
|
||||
if _, err := fmt.Fprintf(w, format, flag.Name); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(format, flag.Name))
|
||||
|
||||
if len(flag.Shorthand) > 0 {
|
||||
if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil {
|
||||
visitErr = err
|
||||
return
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return visitErr
|
||||
}
|
||||
|
||||
func writeRequiredNouns(cmd *Command, w io.Writer) error {
|
||||
if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
|
||||
buf.WriteString(" must_have_one_noun=()\n")
|
||||
sort.Sort(sort.StringSlice(cmd.ValidArgs))
|
||||
for _, value := range cmd.ValidArgs {
|
||||
if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil {
|
||||
return err
|
||||
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeArgAliases(cmd *Command, w io.Writer) error {
|
||||
if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
|
||||
buf.WriteString(" noun_aliases=()\n")
|
||||
sort.Sort(sort.StringSlice(cmd.ArgAliases))
|
||||
for _, value := range cmd.ArgAliases {
|
||||
if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil {
|
||||
return err
|
||||
buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gen(cmd *Command, w io.Writer) error {
|
||||
func gen(buf *bytes.Buffer, cmd *Command) {
|
||||
for _, c := range cmd.Commands() {
|
||||
if !c.IsAvailableCommand() || c == cmd.helpCommand {
|
||||
continue
|
||||
}
|
||||
if err := gen(c, w); err != nil {
|
||||
return err
|
||||
}
|
||||
gen(buf, c)
|
||||
}
|
||||
commandName := cmd.CommandPath()
|
||||
commandName = strings.Replace(commandName, " ", "_", -1)
|
||||
commandName = strings.Replace(commandName, ":", "__", -1)
|
||||
if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeCommands(cmd, w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeFlags(cmd, w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeRequiredFlag(cmd, w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeRequiredNouns(cmd, w); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeArgAliases(cmd, w); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "}\n\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
|
||||
buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
|
||||
writeCommands(buf, cmd)
|
||||
writeFlags(buf, cmd)
|
||||
writeRequiredFlag(buf, cmd)
|
||||
writeRequiredNouns(buf, cmd)
|
||||
writeArgAliases(buf, cmd)
|
||||
buf.WriteString("}\n\n")
|
||||
}
|
||||
|
||||
// GenBashCompletion generates bash completion file and writes to the passed writer.
|
||||
func (cmd *Command) GenBashCompletion(w io.Writer) error {
|
||||
if err := preamble(w, cmd.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
writePreamble(buf, cmd.Name())
|
||||
if len(cmd.BashCompletionFunction) > 0 {
|
||||
if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil {
|
||||
buf.WriteString(cmd.BashCompletionFunction + "\n")
|
||||
}
|
||||
gen(buf, cmd)
|
||||
writePostscript(buf, cmd.Name())
|
||||
|
||||
_, err := buf.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := gen(cmd, w); err != nil {
|
||||
return err
|
||||
}
|
||||
return postscript(w, cmd.Name())
|
||||
}
|
||||
|
||||
func nonCompletableFlag(flag *pflag.Flag) bool {
|
||||
|
|
|
@ -106,7 +106,7 @@ node pod replicationcontroller service
|
|||
|
||||
If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
|
||||
|
||||
```go`
|
||||
```go
|
||||
argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
|
|
@ -2,16 +2,12 @@ package cobra
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var _ = fmt.Println
|
||||
var _ = os.Stderr
|
||||
|
||||
func checkOmit(t *testing.T, found, unexpected string) {
|
||||
if strings.Contains(found, unexpected) {
|
||||
t.Errorf("Unexpected response.\nGot: %q\nBut should not have!\n", unexpected)
|
||||
|
@ -178,3 +174,19 @@ func TestBashCompletionDeprecatedFlag(t *testing.T) {
|
|||
t.Errorf("expected completion to not include %q flag: Got %v", flagName, bashCompletion)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBashCompletion(b *testing.B) {
|
||||
c := initializeWithRootCmd()
|
||||
cmdEcho.AddCommand(cmdTimes)
|
||||
c.AddCommand(cmdEcho, cmdPrint, cmdDeprecated, cmdColon)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
if err := c.GenBashCompletion(buf); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
16
vendor/src/github.com/spf13/cobra/cobra.go
vendored
16
vendor/src/github.com/spf13/cobra/cobra.go
vendored
|
@ -29,6 +29,7 @@ import (
|
|||
var templateFuncs = template.FuncMap{
|
||||
"trim": strings.TrimSpace,
|
||||
"trimRightSpace": trimRightSpace,
|
||||
"trimTrailingWhitespaces": trimRightSpace,
|
||||
"appendIfNotPresent": appendIfNotPresent,
|
||||
"rpad": rpad,
|
||||
"gt": Gt,
|
||||
|
@ -46,6 +47,15 @@ var EnablePrefixMatching = false
|
|||
// To disable sorting, set it to false.
|
||||
var EnableCommandSorting = true
|
||||
|
||||
// MousetrapHelpText enables an information splash screen on Windows
|
||||
// if the CLI is started from explorer.exe.
|
||||
// To disable the mousetrap, just set this variable to blank string ("").
|
||||
// Works only on Microsoft Windows.
|
||||
var MousetrapHelpText string = `This is a command line tool.
|
||||
|
||||
You need to open cmd.exe and run it from there.
|
||||
`
|
||||
|
||||
// AddTemplateFunc adds a template function that's available to Usage and Help
|
||||
// template generation.
|
||||
func AddTemplateFunc(name string, tmplFunc interface{}) {
|
||||
|
@ -65,6 +75,8 @@ func OnInitialize(y ...func()) {
|
|||
initializers = append(initializers, y...)
|
||||
}
|
||||
|
||||
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
|
||||
|
||||
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
|
||||
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
|
||||
// ints and then compared.
|
||||
|
@ -95,6 +107,8 @@ func Gt(a interface{}, b interface{}) bool {
|
|||
return left > right
|
||||
}
|
||||
|
||||
// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
|
||||
|
||||
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
|
||||
func Eq(a interface{}, b interface{}) bool {
|
||||
av := reflect.ValueOf(a)
|
||||
|
@ -115,6 +129,8 @@ func trimRightSpace(s string) string {
|
|||
return strings.TrimRightFunc(s, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
|
||||
|
||||
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
|
||||
func appendIfNotPresent(s, stringToAppend string) string {
|
||||
if strings.Contains(s, stringToAppend) {
|
||||
|
|
127
vendor/src/github.com/spf13/cobra/cobra/cmd/add.go
vendored
127
vendor/src/github.com/spf13/cobra/cobra/cmd/add.go
vendored
|
@ -15,20 +15,20 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(addCmd)
|
||||
addCmd.Flags().StringVarP(&packageName, "package", "t", "", "target package name (e.g. github.com/spf13/hugo)")
|
||||
addCmd.Flags().StringVarP(&parentName, "parent", "p", "RootCmd", "name of parent command for this command")
|
||||
}
|
||||
|
||||
var pName string
|
||||
var packageName, parentName string
|
||||
|
||||
// initialize Command
|
||||
var addCmd = &cobra.Command{
|
||||
Use: "add [command name]",
|
||||
Aliases: []string{"command"},
|
||||
|
@ -40,37 +40,90 @@ and register it to its parent (default RootCmd).
|
|||
If you want your command to be public, pass in the command name
|
||||
with an initial uppercase letter.
|
||||
|
||||
Example: cobra add server -> resulting in a new cmd/server.go
|
||||
`,
|
||||
Example: cobra add server -> resulting in a new cmd/server.go`,
|
||||
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
if len(args) < 1 {
|
||||
er("add needs a name for the command")
|
||||
}
|
||||
guessProjectPath()
|
||||
createCmdFile(args[0])
|
||||
|
||||
var project *Project
|
||||
if packageName != "" {
|
||||
project = NewProject(packageName)
|
||||
} else {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
project = NewProjectFromPath(wd)
|
||||
}
|
||||
|
||||
cmdName := validateCmdName(args[0])
|
||||
cmdPath := filepath.Join(project.CmdPath(), cmdName+".go")
|
||||
createCmdFile(project.License(), cmdPath, cmdName)
|
||||
|
||||
fmt.Fprintln(cmd.OutOrStdout(), cmdName, "created at", cmdPath)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
addCmd.Flags().StringVarP(&pName, "parent", "p", "RootCmd", "name of parent command for this command")
|
||||
}
|
||||
// validateCmdName returns source without any dashes and underscore.
|
||||
// If there will be dash or underscore, next letter will be uppered.
|
||||
// It supports only ASCII (1-byte character) strings.
|
||||
// https://github.com/spf13/cobra/issues/269
|
||||
func validateCmdName(source string) string {
|
||||
i := 0
|
||||
l := len(source)
|
||||
// The output is initialized on demand, then first dash or underscore
|
||||
// occurs.
|
||||
var output string
|
||||
|
||||
func parentName() string {
|
||||
if !strings.HasSuffix(strings.ToLower(pName), "cmd") {
|
||||
return pName + "Cmd"
|
||||
for i < l {
|
||||
if source[i] == '-' || source[i] == '_' {
|
||||
if output == "" {
|
||||
output = source[:i]
|
||||
}
|
||||
|
||||
return pName
|
||||
// If it's last rune and it's dash or underscore,
|
||||
// don't add it output and break the loop.
|
||||
if i == l-1 {
|
||||
break
|
||||
}
|
||||
|
||||
// If next character is dash or underscore,
|
||||
// just skip the current character.
|
||||
if source[i+1] == '-' || source[i+1] == '_' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// If the current character is dash or underscore,
|
||||
// upper next letter and add to output.
|
||||
output += string(unicode.ToUpper(rune(source[i+1])))
|
||||
// We know, what source[i] is dash or underscore and source[i+1] is
|
||||
// uppered character, so make i = i+2.
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
// If the current character isn't dash or underscore,
|
||||
// just add it.
|
||||
if output != "" {
|
||||
output += string(source[i])
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if output == "" {
|
||||
return source // source is initially valid name.
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func createCmdFile(cmdName string) {
|
||||
lic := getLicense()
|
||||
func createCmdFile(license License, path, cmdName string) {
|
||||
template := `{{comment .copyright}}
|
||||
{{comment .license}}
|
||||
|
||||
template := `{{ comment .copyright }}
|
||||
{{ comment .license }}
|
||||
|
||||
package cmd
|
||||
package {{.cmdPackage}}
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -79,8 +132,8 @@ import (
|
|||
)
|
||||
|
||||
// {{.cmdName}}Cmd represents the {{.cmdName}} command
|
||||
var {{ .cmdName }}Cmd = &cobra.Command{
|
||||
Use: "{{ .cmdName }}",
|
||||
var {{.cmdName}}Cmd = &cobra.Command{
|
||||
Use: "{{.cmdName}}",
|
||||
Short: "A brief description of your command",
|
||||
Long: ` + "`" + `A longer description that spans multiple lines and likely contains examples
|
||||
and usage of using your command. For example:
|
||||
|
@ -89,13 +142,12 @@ Cobra is a CLI library for Go that empowers applications.
|
|||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.` + "`" + `,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// TODO: Work your own magic here
|
||||
fmt.Println("{{ .cmdName }} called")
|
||||
fmt.Println("{{.cmdName}} called")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
{{ .parentName }}.AddCommand({{ .cmdName }}Cmd)
|
||||
{{.parentName}}.AddCommand({{.cmdName}}Cmd)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
|
@ -106,23 +158,22 @@ func init() {
|
|||
// Cobra supports local flags which will only run when this command
|
||||
// is called directly, e.g.:
|
||||
// {{.cmdName}}Cmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
|
||||
}
|
||||
`
|
||||
|
||||
var data map[string]interface{}
|
||||
data = make(map[string]interface{})
|
||||
|
||||
data := make(map[string]interface{})
|
||||
data["copyright"] = copyrightLine()
|
||||
data["license"] = lic.Header
|
||||
data["appName"] = projectName()
|
||||
data["viper"] = viper.GetBool("useViper")
|
||||
data["parentName"] = parentName()
|
||||
data["license"] = license.Header
|
||||
data["cmdPackage"] = filepath.Base(filepath.Dir(path)) // last dir of path
|
||||
data["parentName"] = parentName
|
||||
data["cmdName"] = cmdName
|
||||
|
||||
err := writeTemplateToFile(filepath.Join(ProjectPath(), guessCmdDir()), cmdName+".go", template, data)
|
||||
cmdScript, err := executeTemplate(template, data)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
err = writeStringToFile(path, cmdScript)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
fmt.Println(cmdName, "created at", filepath.Join(ProjectPath(), guessCmdDir(), cmdName+".go"))
|
||||
}
|
||||
|
|
100
vendor/src/github.com/spf13/cobra/cobra/cmd/add_test.go
vendored
Normal file
100
vendor/src/github.com/spf13/cobra/cobra/cmd/add_test.go
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestGoldenAddCmd initializes the project "github.com/spf13/testproject"
|
||||
// in GOPATH, adds "test" command
|
||||
// and compares the content of all files in cmd directory of testproject
|
||||
// with appropriate golden files.
|
||||
// Use -update to update existing golden files.
|
||||
func TestGoldenAddCmd(t *testing.T) {
|
||||
projectName := "github.com/spf13/testproject"
|
||||
project := NewProject(projectName)
|
||||
|
||||
// Initialize the project at first.
|
||||
initializeProject(project)
|
||||
defer os.RemoveAll(project.AbsPath())
|
||||
|
||||
// Then add the "test" command.
|
||||
cmdName := "test"
|
||||
cmdPath := filepath.Join(project.CmdPath(), cmdName+".go")
|
||||
createCmdFile(project.License(), cmdPath, cmdName)
|
||||
|
||||
expectedFiles := []string{".", "root.go", "test.go"}
|
||||
gotFiles := []string{}
|
||||
|
||||
// Check project file hierarchy and compare the content of every single file
|
||||
// with appropriate golden file.
|
||||
err := filepath.Walk(project.CmdPath(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make path relative to project.CmdPath().
|
||||
// E.g. path = "/home/user/go/src/github.com/spf13/testproject/cmd/root.go"
|
||||
// then it returns just "root.go".
|
||||
relPath, err := filepath.Rel(project.CmdPath(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath = filepath.ToSlash(relPath)
|
||||
gotFiles = append(gotFiles, relPath)
|
||||
goldenPath := filepath.Join("testdata", filepath.Base(path)+".golden")
|
||||
|
||||
switch relPath {
|
||||
// Know directories.
|
||||
case ".":
|
||||
return nil
|
||||
// Known files.
|
||||
case "root.go", "test.go":
|
||||
if *update {
|
||||
got, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioutil.WriteFile(goldenPath, got, 0644)
|
||||
}
|
||||
return compareFiles(path, goldenPath)
|
||||
}
|
||||
// Unknown file.
|
||||
return errors.New("unknown file: " + path)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check if some files lack.
|
||||
if err := checkLackFiles(expectedFiles, gotFiles); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCmdName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"cmdName", "cmdName"},
|
||||
{"cmd_name", "cmdName"},
|
||||
{"cmd-name", "cmdName"},
|
||||
{"cmd______Name", "cmdName"},
|
||||
{"cmd------Name", "cmdName"},
|
||||
{"cmd______name", "cmdName"},
|
||||
{"cmd------name", "cmdName"},
|
||||
{"cmdName-----", "cmdName"},
|
||||
{"cmdname-", "cmdname"},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
got := validateCmdName(testCase.input)
|
||||
if testCase.expected != got {
|
||||
t.Errorf("Expected %q, got %q", testCase.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
77
vendor/src/github.com/spf13/cobra/cobra/cmd/golden_test.go
vendored
Normal file
77
vendor/src/github.com/spf13/cobra/cobra/cmd/golden_test.go
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
var update = flag.Bool("update", false, "update .golden files")
|
||||
|
||||
func init() {
|
||||
// Mute commands.
|
||||
addCmd.SetOutput(new(bytes.Buffer))
|
||||
initCmd.SetOutput(new(bytes.Buffer))
|
||||
}
|
||||
|
||||
// compareFiles compares the content of files with pathA and pathB.
|
||||
// If contents are equal, it returns nil.
|
||||
// If not, it returns which files are not equal
|
||||
// and diff (if system has diff command) between these files.
|
||||
func compareFiles(pathA, pathB string) error {
|
||||
contentA, err := ioutil.ReadFile(pathA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentB, err := ioutil.ReadFile(pathB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(contentA, contentB) {
|
||||
output := new(bytes.Buffer)
|
||||
output.WriteString(fmt.Sprintf("%q and %q are not equal!\n\n", pathA, pathB))
|
||||
|
||||
diffPath, err := exec.LookPath("diff")
|
||||
if err != nil {
|
||||
// Don't execute diff if it can't be found.
|
||||
return nil
|
||||
}
|
||||
diffCmd := exec.Command(diffPath, pathA, pathB)
|
||||
diffCmd.Stdout = output
|
||||
diffCmd.Stderr = output
|
||||
|
||||
output.WriteString("$ diff " + pathA + " " + pathB + "\n")
|
||||
if err := diffCmd.Run(); err != nil {
|
||||
output.WriteString("\n" + err.Error())
|
||||
}
|
||||
return errors.New(output.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkLackFiles checks if all elements of expected are in got.
|
||||
func checkLackFiles(expected, got []string) error {
|
||||
lacks := make([]string, 0, len(expected))
|
||||
for _, ev := range expected {
|
||||
if !stringInStringSlice(ev, got) {
|
||||
lacks = append(lacks, ev)
|
||||
}
|
||||
}
|
||||
if len(lacks) > 0 {
|
||||
return fmt.Errorf("Lack %v file(s): %v", len(lacks), lacks)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// stringInStringSlice checks if s is an element of slice.
|
||||
func stringInStringSlice(s string, slice []string) bool {
|
||||
for _, v := range slice {
|
||||
if s == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -21,335 +21,119 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// var BaseDir = ""
|
||||
// var AppName = ""
|
||||
// var CommandDir = ""
|
||||
|
||||
var funcMap template.FuncMap
|
||||
var projectPath = ""
|
||||
var inputPath = ""
|
||||
var projectBase = ""
|
||||
|
||||
// for testing only
|
||||
var testWd = ""
|
||||
|
||||
var cmdDirs = []string{"cmd", "cmds", "command", "commands"}
|
||||
var cmdDirs = [...]string{"cmd", "cmds", "command", "commands"}
|
||||
var srcPaths []string
|
||||
|
||||
func init() {
|
||||
funcMap = template.FuncMap{
|
||||
"comment": commentifyString,
|
||||
// Initialize srcPaths.
|
||||
envGoPath := os.Getenv("GOPATH")
|
||||
goPaths := filepath.SplitList(envGoPath)
|
||||
if len(goPaths) == 0 {
|
||||
er("$GOPATH is not set")
|
||||
}
|
||||
srcPaths = make([]string, 0, len(goPaths))
|
||||
for _, goPath := range goPaths {
|
||||
srcPaths = append(srcPaths, filepath.Join(goPath, "src"))
|
||||
}
|
||||
}
|
||||
|
||||
func er(msg interface{}) {
|
||||
fmt.Println("Error:", msg)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
// Check if a file or directory exists.
|
||||
func exists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func ProjectPath() string {
|
||||
if projectPath == "" {
|
||||
guessProjectPath()
|
||||
}
|
||||
|
||||
return projectPath
|
||||
}
|
||||
|
||||
// wrapper of the os package so we can test better
|
||||
func getWd() (string, error) {
|
||||
if testWd == "" {
|
||||
return os.Getwd()
|
||||
}
|
||||
return testWd, nil
|
||||
}
|
||||
|
||||
func guessCmdDir() string {
|
||||
guessProjectPath()
|
||||
if b, _ := isEmpty(projectPath); b {
|
||||
return "cmd"
|
||||
}
|
||||
|
||||
files, _ := filepath.Glob(projectPath + string(os.PathSeparator) + "c*")
|
||||
for _, f := range files {
|
||||
for _, c := range cmdDirs {
|
||||
if f == c {
|
||||
return c
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "cmd"
|
||||
}
|
||||
|
||||
func guessImportPath() string {
|
||||
guessProjectPath()
|
||||
|
||||
if !strings.HasPrefix(projectPath, getSrcPath()) {
|
||||
er("Cobra only supports project within $GOPATH")
|
||||
}
|
||||
|
||||
return filepath.ToSlash(filepath.Clean(strings.TrimPrefix(projectPath, getSrcPath())))
|
||||
}
|
||||
|
||||
func getSrcPath() string {
|
||||
return filepath.Join(os.Getenv("GOPATH"), "src") + string(os.PathSeparator)
|
||||
}
|
||||
|
||||
func projectName() string {
|
||||
return filepath.Base(ProjectPath())
|
||||
}
|
||||
|
||||
func guessProjectPath() {
|
||||
// if no path is provided... assume CWD.
|
||||
if inputPath == "" {
|
||||
x, err := getWd()
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
|
||||
// inspect CWD
|
||||
base := filepath.Base(x)
|
||||
|
||||
// if we are in the cmd directory.. back up
|
||||
for _, c := range cmdDirs {
|
||||
if base == c {
|
||||
projectPath = filepath.Dir(x)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if projectPath == "" {
|
||||
projectPath = filepath.Clean(x)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
srcPath := getSrcPath()
|
||||
// if provided, inspect for logical locations
|
||||
if strings.ContainsRune(inputPath, os.PathSeparator) {
|
||||
if filepath.IsAbs(inputPath) || filepath.HasPrefix(inputPath, string(os.PathSeparator)) {
|
||||
// if Absolute, use it
|
||||
projectPath = filepath.Clean(inputPath)
|
||||
return
|
||||
}
|
||||
// If not absolute but contains slashes,
|
||||
// assuming it means create it from $GOPATH
|
||||
count := strings.Count(inputPath, string(os.PathSeparator))
|
||||
|
||||
switch count {
|
||||
// If only one directory deep, assume "github.com"
|
||||
case 1:
|
||||
projectPath = filepath.Join(srcPath, "github.com", inputPath)
|
||||
return
|
||||
case 2:
|
||||
projectPath = filepath.Join(srcPath, inputPath)
|
||||
return
|
||||
default:
|
||||
er("Unknown directory")
|
||||
}
|
||||
} else {
|
||||
// hardest case.. just a word.
|
||||
if projectBase == "" {
|
||||
x, err := getWd()
|
||||
if err == nil {
|
||||
projectPath = filepath.Join(x, inputPath)
|
||||
return
|
||||
}
|
||||
er(err)
|
||||
} else {
|
||||
projectPath = filepath.Join(srcPath, projectBase, inputPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// isEmpty checks if a given path is empty.
|
||||
func isEmpty(path string) (bool, error) {
|
||||
if b, _ := exists(path); !b {
|
||||
return false, fmt.Errorf("%q path does not exist", path)
|
||||
}
|
||||
func isEmpty(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
er(err)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
f, err := os.Open(path)
|
||||
// FIX: Resource leak - f.close() should be called here by defer or is missed
|
||||
// if the err != nil branch is taken.
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
dirs, err := f.Readdirnames(1)
|
||||
if err != nil && err != io.EOF {
|
||||
er(err)
|
||||
}
|
||||
list, _ := f.Readdir(-1)
|
||||
// f.Close() - see bug fix above
|
||||
return len(list) == 0, nil
|
||||
return len(dirs) == 0
|
||||
}
|
||||
return fi.Size() == 0, nil
|
||||
return fi.Size() == 0
|
||||
}
|
||||
|
||||
// isDir checks if a given path is a directory.
|
||||
func isDir(path string) (bool, error) {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
// exists checks if a file or directory exists.
|
||||
func exists(path string) bool {
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
return fi.IsDir(), nil
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
er(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dirExists checks if a path exists and is a directory.
|
||||
func dirExists(path string) (bool, error) {
|
||||
fi, err := os.Stat(path)
|
||||
if err == nil && fi.IsDir() {
|
||||
return true, nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func writeTemplateToFile(path string, file string, template string, data interface{}) error {
|
||||
filename := filepath.Join(path, file)
|
||||
|
||||
r, err := templateToReader(template, data)
|
||||
|
||||
func executeTemplate(tmplStr string, data interface{}) (string, error) {
|
||||
tmpl, err := template.New("").Funcs(template.FuncMap{"comment": commentifyString}).Parse(tmplStr)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = safeWriteToDisk(filename, r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeStringToFile(path, file, text string) error {
|
||||
filename := filepath.Join(path, file)
|
||||
|
||||
r := strings.NewReader(text)
|
||||
err := safeWriteToDisk(filename, r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func templateToReader(tpl string, data interface{}) (io.Reader, error) {
|
||||
tmpl := template.New("")
|
||||
tmpl.Funcs(funcMap)
|
||||
tmpl, err := tmpl.Parse(tpl)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = tmpl.Execute(buf, data)
|
||||
|
||||
return buf, err
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
// Same as WriteToDisk but checks to see if file/directory already exists.
|
||||
func safeWriteToDisk(inpath string, r io.Reader) (err error) {
|
||||
dir, _ := filepath.Split(inpath)
|
||||
ospath := filepath.FromSlash(dir)
|
||||
func writeStringToFile(path string, s string) error {
|
||||
return writeToFile(path, strings.NewReader(s))
|
||||
}
|
||||
|
||||
if ospath != "" {
|
||||
err = os.MkdirAll(ospath, 0777) // rwx, rw, r
|
||||
if err != nil {
|
||||
return
|
||||
// writeToFile writes r to file with path only
|
||||
// if file/directory on given path doesn't exist.
|
||||
// If file/directory exists on given path, then
|
||||
// it terminates app and prints an appropriate error.
|
||||
func writeToFile(path string, r io.Reader) error {
|
||||
if exists(path) {
|
||||
return fmt.Errorf("%v already exists", path)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "" {
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ex, err := exists(inpath)
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if ex {
|
||||
return fmt.Errorf("%v already exists", inpath)
|
||||
}
|
||||
|
||||
file, err := os.Create(inpath)
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(file, r)
|
||||
return
|
||||
}
|
||||
|
||||
func getLicense() License {
|
||||
l := whichLicense()
|
||||
if l != "" {
|
||||
if x, ok := Licenses[l]; ok {
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
return Licenses["apache"]
|
||||
}
|
||||
|
||||
func whichLicense() string {
|
||||
// if explicitly flagged, use that
|
||||
if userLicense != "" {
|
||||
return matchLicense(userLicense)
|
||||
}
|
||||
|
||||
// if already present in the project, use that
|
||||
// TODO: Inspect project for existing license
|
||||
|
||||
// default to viper's setting
|
||||
|
||||
if viper.IsSet("license.header") || viper.IsSet("license.text") {
|
||||
if custom, ok := Licenses["custom"]; ok {
|
||||
custom.Header = viper.GetString("license.header")
|
||||
custom.Text = viper.GetString("license.text")
|
||||
Licenses["custom"] = custom
|
||||
return "custom"
|
||||
}
|
||||
}
|
||||
|
||||
return matchLicense(viper.GetString("license"))
|
||||
}
|
||||
|
||||
func copyrightLine() string {
|
||||
author := viper.GetString("author")
|
||||
year := time.Now().Format("2006")
|
||||
|
||||
return "Copyright © " + year + " " + author
|
||||
return err
|
||||
}
|
||||
|
||||
// commentfyString comments every line of in.
|
||||
func commentifyString(in string) string {
|
||||
var newlines []string
|
||||
lines := strings.Split(in, "\n")
|
||||
for _, x := range lines {
|
||||
if !strings.HasPrefix(x, "//") {
|
||||
if x != "" {
|
||||
newlines = append(newlines, "// "+x)
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "//") {
|
||||
newlines = append(newlines, line)
|
||||
} else {
|
||||
if line == "" {
|
||||
newlines = append(newlines, "//")
|
||||
}
|
||||
} else {
|
||||
newlines = append(newlines, x)
|
||||
newlines = append(newlines, "// "+line)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(newlines, "\n")
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var _ = fmt.Println
|
||||
var _ = os.Stderr
|
||||
|
||||
func checkGuess(t *testing.T, wd, input, expected string) {
|
||||
testWd = wd
|
||||
inputPath = input
|
||||
guessProjectPath()
|
||||
|
||||
if projectPath != expected {
|
||||
t.Errorf("Unexpected Project Path. \n Got: %q\nExpected: %q\n", projectPath, expected)
|
||||
}
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
func reset() {
|
||||
testWd = ""
|
||||
inputPath = ""
|
||||
projectPath = ""
|
||||
}
|
||||
|
||||
func TestProjectPath(t *testing.T) {
|
||||
checkGuess(t, "", filepath.Join("github.com", "spf13", "hugo"), filepath.Join(getSrcPath(), "github.com", "spf13", "hugo"))
|
||||
checkGuess(t, "", filepath.Join("spf13", "hugo"), filepath.Join(getSrcPath(), "github.com", "spf13", "hugo"))
|
||||
checkGuess(t, "", filepath.Join("/", "bar", "foo"), filepath.Join("/", "bar", "foo"))
|
||||
checkGuess(t, "/bar/foo", "baz", filepath.Join("/", "bar", "foo", "baz"))
|
||||
checkGuess(t, "/bar/foo/cmd", "", filepath.Join("/", "bar", "foo"))
|
||||
checkGuess(t, "/bar/foo/command", "", filepath.Join("/", "bar", "foo"))
|
||||
checkGuess(t, "/bar/foo/commands", "", filepath.Join("/", "bar", "foo"))
|
||||
checkGuess(t, "github.com/spf13/hugo/../hugo", "", filepath.Join("github.com", "spf13", "hugo"))
|
||||
}
|
237
vendor/src/github.com/spf13/cobra/cobra/cmd/init.go
vendored
237
vendor/src/github.com/spf13/cobra/cobra/cmd/init.go
vendored
|
@ -14,20 +14,15 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(initCmd)
|
||||
}
|
||||
|
||||
// initialize Command
|
||||
var initCmd = &cobra.Command{
|
||||
Use: "init [name]",
|
||||
Aliases: []string{"initialize", "initialise", "create"},
|
||||
|
@ -45,79 +40,75 @@ and the appropriate structure for a Cobra-based CLI application.
|
|||
Init will not use an existing directory with contents.`,
|
||||
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch len(args) {
|
||||
case 0:
|
||||
inputPath = ""
|
||||
|
||||
case 1:
|
||||
inputPath = args[0]
|
||||
|
||||
default:
|
||||
er("init doesn't support more than 1 parameter")
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
guessProjectPath()
|
||||
initializePath(projectPath)
|
||||
|
||||
var project *Project
|
||||
if len(args) == 0 {
|
||||
project = NewProjectFromPath(wd)
|
||||
} else if len(args) == 1 {
|
||||
arg := args[0]
|
||||
if arg[0] == '.' {
|
||||
arg = filepath.Join(wd, arg)
|
||||
}
|
||||
if filepath.IsAbs(arg) {
|
||||
project = NewProjectFromPath(arg)
|
||||
} else {
|
||||
project = NewProject(arg)
|
||||
}
|
||||
} else {
|
||||
er("please provide only one argument")
|
||||
}
|
||||
|
||||
initializeProject(project)
|
||||
|
||||
fmt.Fprintln(cmd.OutOrStdout(), `Your Cobra application is ready at
|
||||
`+project.AbsPath()+`.
|
||||
|
||||
Give it a try by going there and running `+"`go run main.go`."+`
|
||||
Add commands to it by running `+"`cobra add [cmdname]`.")
|
||||
},
|
||||
}
|
||||
|
||||
func initializePath(path string) {
|
||||
b, err := exists(path)
|
||||
func initializeProject(project *Project) {
|
||||
if !exists(project.AbsPath()) { // If path doesn't yet exist, create it
|
||||
err := os.MkdirAll(project.AbsPath(), os.ModePerm)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
} else if !isEmpty(project.AbsPath()) { // If path exists and is not empty don't use it
|
||||
er("Cobra will not create a new project in a non empty directory: " + project.AbsPath())
|
||||
}
|
||||
|
||||
if !b { // If path doesn't yet exist, create it
|
||||
err := os.MkdirAll(path, os.ModePerm)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
} else { // If path exists and is not empty don't use it
|
||||
empty, err := exists(path)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
if !empty {
|
||||
er("Cobra will not create a new project in a non empty directory")
|
||||
}
|
||||
}
|
||||
// We have a directory and it's empty.. Time to initialize it.
|
||||
|
||||
createLicenseFile()
|
||||
createMainFile()
|
||||
createRootCmdFile()
|
||||
// We have a directory and it's empty. Time to initialize it.
|
||||
createLicenseFile(project.License(), project.AbsPath())
|
||||
createMainFile(project)
|
||||
createRootCmdFile(project)
|
||||
}
|
||||
|
||||
func createLicenseFile() {
|
||||
lic := getLicense()
|
||||
|
||||
// Don't bother writing a LICENSE file if there is no text.
|
||||
if lic.Text != "" {
|
||||
func createLicenseFile(license License, path string) {
|
||||
data := make(map[string]interface{})
|
||||
|
||||
// Try to remove the email address, if any
|
||||
data["copyright"] = strings.Split(copyrightLine(), " <")[0]
|
||||
|
||||
data["appName"] = projectName()
|
||||
data["copyright"] = copyrightLine()
|
||||
|
||||
// Generate license template from text and data.
|
||||
r, _ := templateToReader(lic.Text, data)
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
text, err := executeTemplate(license.Text, data)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
|
||||
err := writeTemplateToFile(ProjectPath(), "LICENSE", buf.String(), data)
|
||||
_ = err
|
||||
// if err != nil {
|
||||
// er(err)
|
||||
// }
|
||||
// Write license text to LICENSE file.
|
||||
err = writeStringToFile(filepath.Join(path, "LICENSE"), text)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createMainFile() {
|
||||
lic := getLicense()
|
||||
func createMainFile(project *Project) {
|
||||
mainTemplate := `{{ comment .copyright }}
|
||||
{{if .license}}{{ comment .license }}{{end}}
|
||||
|
||||
template := `{{ comment .copyright }}
|
||||
{{if .license}}{{ comment .license }}
|
||||
{{end}}
|
||||
package main
|
||||
|
||||
import "{{ .importpath }}"
|
||||
|
@ -127,46 +118,41 @@ func main() {
|
|||
}
|
||||
`
|
||||
data := make(map[string]interface{})
|
||||
|
||||
data["copyright"] = copyrightLine()
|
||||
data["appName"] = projectName()
|
||||
data["license"] = project.License().Header
|
||||
data["importpath"] = path.Join(project.Name(), filepath.Base(project.CmdPath()))
|
||||
|
||||
// Generate license template from header and data.
|
||||
r, _ := templateToReader(lic.Header, data)
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
data["license"] = buf.String()
|
||||
mainScript, err := executeTemplate(mainTemplate, data)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
|
||||
data["importpath"] = guessImportPath() + "/" + guessCmdDir()
|
||||
|
||||
err := writeTemplateToFile(ProjectPath(), "main.go", template, data)
|
||||
_ = err
|
||||
// if err != nil {
|
||||
// er(err)
|
||||
// }
|
||||
err = writeStringToFile(filepath.Join(project.AbsPath(), "main.go"), mainScript)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createRootCmdFile() {
|
||||
lic := getLicense()
|
||||
func createRootCmdFile(project *Project) {
|
||||
template := `{{comment .copyright}}
|
||||
{{if .license}}{{comment .license}}{{end}}
|
||||
|
||||
template := `{{ comment .copyright }}
|
||||
{{if .license}}{{ comment .license }}
|
||||
{{end}}
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
{{ if .viper }} "github.com/spf13/viper"
|
||||
{{ end }})
|
||||
{{if .viper}}
|
||||
var cfgFile string
|
||||
{{ end }}
|
||||
homedir "github.com/mitchellh/go-homedir"{{end}}
|
||||
"github.com/spf13/cobra"{{if .viper}}
|
||||
"github.com/spf13/viper"{{end}}
|
||||
){{if .viper}}
|
||||
|
||||
var cfgFile string{{end}}
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "{{ .appName }}",
|
||||
Use: "{{.appName}}",
|
||||
Short: "A brief description of your application",
|
||||
Long: ` + "`" + `A longer description that spans multiple lines and likely contains
|
||||
examples and usage of using your application. For example:
|
||||
|
@ -174,72 +160,75 @@ examples and usage of using your application. For example:
|
|||
Cobra is a CLI library for Go that empowers applications.
|
||||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.` + "`" + `,
|
||||
// Uncomment the following line if your bare application
|
||||
// has an action associated with it:
|
||||
// Run: func(cmd *cobra.Command, args []string) { },
|
||||
// Uncomment the following line if your bare application
|
||||
// has an action associated with it:
|
||||
// Run: func(cmd *cobra.Command, args []string) { },
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command sets flags appropriately.
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(-1)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
{{ if .viper }} cobra.OnInitialize(initConfig)
|
||||
func init() { {{if .viper}}
|
||||
cobra.OnInitialize(initConfig)
|
||||
{{end}}
|
||||
// Here you will define your flags and configuration settings.
|
||||
// Cobra supports persistent flags, which, if defined here,
|
||||
// will be global for your application.{{ if .viper }}
|
||||
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)"){{ else }}
|
||||
// RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)"){{ end }}
|
||||
|
||||
{{ end }} // Here you will define your flags and configuration settings.
|
||||
// Cobra supports Persistent Flags, which, if defined here,
|
||||
// will be global for your application.
|
||||
{{ if .viper }}
|
||||
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)")
|
||||
{{ else }}
|
||||
// RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)")
|
||||
{{ end }} // Cobra also supports local flags, which will only run
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
||||
{{ if .viper }}
|
||||
}{{ if .viper }}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
if cfgFile != "" { // enable ability to specify config file via flag
|
||||
if cfgFile != "" {
|
||||
// Use config file from the flag.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
} else {
|
||||
// Find home directory.
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Search config in home directory with name ".{{ .appName }}" (without extension).
|
||||
viper.AddConfigPath(home)
|
||||
viper.SetConfigName(".{{ .appName }}")
|
||||
}
|
||||
|
||||
viper.SetConfigName(".{{ .appName }}") // name of config file (without extension)
|
||||
viper.AddConfigPath(os.Getenv("HOME")) // adding home directory as first search path
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
|
||||
// If a config file is found, read it in.
|
||||
if err := viper.ReadInConfig(); err == nil {
|
||||
fmt.Println("Using config file:", viper.ConfigFileUsed())
|
||||
}
|
||||
}
|
||||
{{ end }}`
|
||||
}{{ end }}
|
||||
`
|
||||
|
||||
data := make(map[string]interface{})
|
||||
|
||||
data["copyright"] = copyrightLine()
|
||||
data["appName"] = projectName()
|
||||
|
||||
// Generate license template from header and data.
|
||||
r, _ := templateToReader(lic.Header, data)
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
data["license"] = buf.String()
|
||||
|
||||
data["viper"] = viper.GetBool("useViper")
|
||||
data["license"] = project.License().Header
|
||||
data["appName"] = path.Base(project.Name())
|
||||
|
||||
err := writeTemplateToFile(ProjectPath()+string(os.PathSeparator)+guessCmdDir(), "root.go", template, data)
|
||||
rootCmdScript, err := executeTemplate(template, data)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
|
||||
err = writeStringToFile(filepath.Join(project.CmdPath(), "root.go"), rootCmdScript)
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
|
||||
fmt.Println("Your Cobra application is ready at")
|
||||
fmt.Println(ProjectPath())
|
||||
fmt.Println("Give it a try by going there and running `go run main.go`")
|
||||
fmt.Println("Add commands to it by running `cobra add [cmdname]`")
|
||||
}
|
||||
|
|
74
vendor/src/github.com/spf13/cobra/cobra/cmd/init_test.go
vendored
Normal file
74
vendor/src/github.com/spf13/cobra/cobra/cmd/init_test.go
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestGoldenInitCmd initializes the project "github.com/spf13/testproject"
|
||||
// in GOPATH and compares the content of files in initialized project with
|
||||
// appropriate golden files ("testdata/*.golden").
|
||||
// Use -update to update existing golden files.
|
||||
func TestGoldenInitCmd(t *testing.T) {
|
||||
projectName := "github.com/spf13/testproject"
|
||||
project := NewProject(projectName)
|
||||
defer os.RemoveAll(project.AbsPath())
|
||||
|
||||
os.Args = []string{"cobra", "init", projectName}
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
t.Fatal("Error by execution:", err)
|
||||
}
|
||||
|
||||
expectedFiles := []string{".", "cmd", "LICENSE", "main.go", "cmd/root.go"}
|
||||
gotFiles := []string{}
|
||||
|
||||
// Check project file hierarchy and compare the content of every single file
|
||||
// with appropriate golden file.
|
||||
err := filepath.Walk(project.AbsPath(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make path relative to project.AbsPath().
|
||||
// E.g. path = "/home/user/go/src/github.com/spf13/testproject/cmd/root.go"
|
||||
// then it returns just "cmd/root.go".
|
||||
relPath, err := filepath.Rel(project.AbsPath(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath = filepath.ToSlash(relPath)
|
||||
gotFiles = append(gotFiles, relPath)
|
||||
goldenPath := filepath.Join("testdata", filepath.Base(path)+".golden")
|
||||
|
||||
switch relPath {
|
||||
// Know directories.
|
||||
case ".", "cmd":
|
||||
return nil
|
||||
// Known files.
|
||||
case "LICENSE", "main.go", "cmd/root.go":
|
||||
if *update {
|
||||
got, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(goldenPath, got, 0644); err != nil {
|
||||
t.Fatal("Error while updating file:", err)
|
||||
}
|
||||
}
|
||||
return compareFiles(path, goldenPath)
|
||||
}
|
||||
// Unknown file.
|
||||
return errors.New("unknown file: " + path)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check if some files lack.
|
||||
if err := checkLackFiles(expectedFiles, gotFiles); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
684
vendor/src/github.com/spf13/cobra/cobra/cmd/license_agpl.go
vendored
Normal file
684
vendor/src/github.com/spf13/cobra/cobra/cmd/license_agpl.go
vendored
Normal file
|
@ -0,0 +1,684 @@
|
|||
package cmd
|
||||
|
||||
func initAgpl() {
|
||||
Licenses["agpl"] = License{
|
||||
Name: "GNU Affero General Public License",
|
||||
PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"},
|
||||
Header: `{{.copyright}}
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.`,
|
||||
Text: ` GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
`,
|
||||
}
|
||||
}
|
|
@ -19,18 +19,17 @@ func initApache2() {
|
|||
Licenses["apache"] = License{
|
||||
Name: "Apache 2.0",
|
||||
PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"},
|
||||
Header: `
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.`,
|
||||
Header: `Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.`,
|
||||
Text: `
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
|
|
|
@ -18,7 +18,8 @@ package cmd
|
|||
func initBsdClause2() {
|
||||
Licenses["freebsd"] = License{
|
||||
Name: "Simplified BSD License",
|
||||
PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2 clause bsd"},
|
||||
PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd",
|
||||
"2 clause bsd", "simplified bsd license"},
|
||||
Header: `
|
||||
All rights reserved.
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ package cmd
|
|||
func initBsdClause3() {
|
||||
Licenses["bsd"] = License{
|
||||
Name: "NewBSD",
|
||||
PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd"},
|
||||
PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"},
|
||||
Header: `
|
||||
All rights reserved.
|
||||
|
||||
|
|
|
@ -18,21 +18,21 @@ package cmd
|
|||
func initGpl2() {
|
||||
Licenses["gpl2"] = License{
|
||||
Name: "GNU General Public License 2.0",
|
||||
PossibleMatches: []string{"gpl2", "gnu gpl2"},
|
||||
Header: `{{ .copyright }}
|
||||
PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"},
|
||||
Header: `{{.copyright}}
|
||||
|
||||
{{ .appName }} is free software; you can redistribute it and/or
|
||||
This program is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; either version 2
|
||||
of the License, or (at your option) any later version.
|
||||
|
||||
{{ .appName }} is distributed in the hope that it will be useful,
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with {{ .appName }}. If not, see <http://www.gnu.org/licenses/>.`,
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.`,
|
||||
Text: ` GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
|
@ -313,6 +313,65 @@ PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
|||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type 'show c' for details.
|
||||
|
||||
The hypothetical commands 'show w' and 'show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than 'show w' and 'show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
'Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
||||
`,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,23 +18,21 @@ package cmd
|
|||
func initGpl3() {
|
||||
Licenses["gpl3"] = License{
|
||||
Name: "GNU General Public License 3.0",
|
||||
PossibleMatches: []string{"gpl3", "gpl", "gnu gpl3", "gnu gpl"},
|
||||
Header: `
|
||||
This file is part of {{ .appName }}.
|
||||
PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"},
|
||||
Header: `{{.copyright}}
|
||||
|
||||
{{ .appName }} is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
{{ .appName }} is distributed in the hope that it will be useful,
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with {{ .appName }}. If not, see <http://www.gnu.org/licenses/>.
|
||||
`,
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.`,
|
||||
Text: ` GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
|
@ -656,6 +654,59 @@ Program, unless a warranty or assumption of liability accompanies a
|
|||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type 'show c' for details.
|
||||
|
||||
The hypothetical commands 'show w' and 'show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
`,
|
||||
}
|
||||
}
|
||||
|
|
187
vendor/src/github.com/spf13/cobra/cobra/cmd/license_lgpl.go
vendored
Normal file
187
vendor/src/github.com/spf13/cobra/cobra/cmd/license_lgpl.go
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
package cmd
|
||||
|
||||
func initLgpl() {
|
||||
Licenses["lgpl"] = License{
|
||||
Name: "GNU Lesser General Public License",
|
||||
PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"},
|
||||
Header: `{{.copyright}}
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.`,
|
||||
Text: ` GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.`,
|
||||
}
|
||||
}
|
|
@ -15,13 +15,18 @@
|
|||
|
||||
package cmd
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
//Licenses contains all possible licenses a user can chose from
|
||||
var Licenses map[string]License
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
//License represents a software license agreement, containing the Name of
|
||||
// the license, its possible matches (on the command line as given to cobra)
|
||||
// Licenses contains all possible licenses a user can choose from.
|
||||
var Licenses = make(map[string]License)
|
||||
|
||||
// License represents a software license agreement, containing the Name of
|
||||
// the license, its possible matches (on the command line as given to cobra),
|
||||
// the header to be used with each file on the file's creating, and the text
|
||||
// of the license
|
||||
type License struct {
|
||||
|
@ -31,45 +36,79 @@ type License struct {
|
|||
Header string // License header for source files
|
||||
}
|
||||
|
||||
// given a license name (in), try to match the license indicated
|
||||
func matchLicense(in string) string {
|
||||
func init() {
|
||||
// Allows a user to not use a license.
|
||||
Licenses["none"] = License{"None", []string{"none", "false"}, "", ""}
|
||||
|
||||
initApache2()
|
||||
initMit()
|
||||
initBsdClause3()
|
||||
initBsdClause2()
|
||||
initGpl2()
|
||||
initGpl3()
|
||||
initLgpl()
|
||||
initAgpl()
|
||||
}
|
||||
|
||||
// getLicense returns license specified by user in flag or in config.
|
||||
// If user didn't specify the license, it returns Apache License 2.0.
|
||||
//
|
||||
// TODO: Inspect project for existing license
|
||||
func getLicense() License {
|
||||
// If explicitly flagged, use that.
|
||||
if userLicense != "" {
|
||||
return findLicense(userLicense)
|
||||
}
|
||||
|
||||
// If user wants to have custom license, use that.
|
||||
if viper.IsSet("license.header") || viper.IsSet("license.text") {
|
||||
return License{Header: viper.GetString("license.header"),
|
||||
Text: "license.text"}
|
||||
}
|
||||
|
||||
// If user wants to have built-in license, use that.
|
||||
if viper.IsSet("license") {
|
||||
return findLicense(viper.GetString("license"))
|
||||
}
|
||||
|
||||
// If user didn't set any license, use Apache 2.0 by default.
|
||||
return Licenses["apache"]
|
||||
}
|
||||
|
||||
func copyrightLine() string {
|
||||
author := viper.GetString("author")
|
||||
year := time.Now().Format("2006")
|
||||
|
||||
return "Copyright © " + year + " " + author
|
||||
}
|
||||
|
||||
// findLicense looks for License object of built-in licenses.
|
||||
// If it didn't find license, then the app will be terminated and
|
||||
// error will be printed.
|
||||
func findLicense(name string) License {
|
||||
found := matchLicense(name)
|
||||
if found == "" {
|
||||
er("unknown license: " + name)
|
||||
}
|
||||
return Licenses[found]
|
||||
}
|
||||
|
||||
// matchLicense compares the given a license name
|
||||
// to PossibleMatches of all built-in licenses.
|
||||
// It returns blank string, if name is blank string or it didn't find
|
||||
// then appropriate match to name.
|
||||
func matchLicense(name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
for key, lic := range Licenses {
|
||||
for _, match := range lic.PossibleMatches {
|
||||
if strings.EqualFold(in, match) {
|
||||
if strings.EqualFold(name, match) {
|
||||
return key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
Licenses = make(map[string]License)
|
||||
|
||||
// Allows a user to not use a license.
|
||||
Licenses["none"] = License{"None", []string{"none", "false"}, "", ""}
|
||||
|
||||
// Allows a user to use config for a custom license.
|
||||
Licenses["custom"] = License{"Custom", []string{}, "", ""}
|
||||
|
||||
initApache2()
|
||||
|
||||
initMit()
|
||||
|
||||
initBsdClause3()
|
||||
|
||||
initBsdClause2()
|
||||
|
||||
initGpl2()
|
||||
|
||||
initGpl3()
|
||||
|
||||
// Licenses["apache20"] = License{
|
||||
// Name: "Apache 2.0",
|
||||
// PossibleMatches: []string{"apache", "apache20", ""},
|
||||
// Header: `
|
||||
// `,
|
||||
// Text: `
|
||||
// `,
|
||||
// }
|
||||
}
|
||||
|
|
195
vendor/src/github.com/spf13/cobra/cobra/cmd/project.go
vendored
Normal file
195
vendor/src/github.com/spf13/cobra/cobra/cmd/project.go
vendored
Normal file
|
@ -0,0 +1,195 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Project contains name, license and paths to projects.
|
||||
type Project struct {
|
||||
absPath string
|
||||
cmdPath string
|
||||
srcPath string
|
||||
license License
|
||||
name string
|
||||
}
|
||||
|
||||
// NewProject returns Project with specified project name.
|
||||
// If projectName is blank string, it returns nil.
|
||||
func NewProject(projectName string) *Project {
|
||||
if projectName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := new(Project)
|
||||
p.name = projectName
|
||||
|
||||
// 1. Find already created protect.
|
||||
p.absPath = findPackage(projectName)
|
||||
|
||||
// 2. If there are no created project with this path, and user is in GOPATH,
|
||||
// then use GOPATH/src/projectName.
|
||||
if p.absPath == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
for _, srcPath := range srcPaths {
|
||||
goPath := filepath.Dir(srcPath)
|
||||
if filepathHasPrefix(wd, goPath) {
|
||||
p.absPath = filepath.Join(srcPath, projectName)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. If user is not in GOPATH, then use (first GOPATH)/src/projectName.
|
||||
if p.absPath == "" {
|
||||
p.absPath = filepath.Join(srcPaths[0], projectName)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// findPackage returns full path to existing go package in GOPATHs.
|
||||
// findPackage returns "", if it can't find path.
|
||||
// If packageName is "", findPackage returns "".
|
||||
func findPackage(packageName string) string {
|
||||
if packageName == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, srcPath := range srcPaths {
|
||||
packagePath := filepath.Join(srcPath, packageName)
|
||||
if exists(packagePath) {
|
||||
return packagePath
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewProjectFromPath returns Project with specified absolute path to
|
||||
// package.
|
||||
// If absPath is blank string or if absPath is not actually absolute,
|
||||
// it returns nil.
|
||||
func NewProjectFromPath(absPath string) *Project {
|
||||
if absPath == "" || !filepath.IsAbs(absPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := new(Project)
|
||||
p.absPath = absPath
|
||||
p.absPath = strings.TrimSuffix(p.absPath, findCmdDir(p.absPath))
|
||||
p.name = filepath.ToSlash(trimSrcPath(p.absPath, p.SrcPath()))
|
||||
return p
|
||||
}
|
||||
|
||||
// trimSrcPath trims at the beginning of absPath the srcPath.
|
||||
func trimSrcPath(absPath, srcPath string) string {
|
||||
relPath, err := filepath.Rel(srcPath, absPath)
|
||||
if err != nil {
|
||||
er("Cobra supports project only within $GOPATH: " + err.Error())
|
||||
}
|
||||
return relPath
|
||||
}
|
||||
|
||||
// License returns the License object of project.
|
||||
func (p *Project) License() License {
|
||||
if p.license.Text == "" && p.license.Name != "None" {
|
||||
p.license = getLicense()
|
||||
}
|
||||
|
||||
return p.license
|
||||
}
|
||||
|
||||
// Name returns the name of project, e.g. "github.com/spf13/cobra"
|
||||
func (p Project) Name() string {
|
||||
return p.name
|
||||
}
|
||||
|
||||
// CmdPath returns absolute path to directory, where all commands are located.
|
||||
//
|
||||
// CmdPath returns blank string, only if p.AbsPath() is a blank string.
|
||||
func (p *Project) CmdPath() string {
|
||||
if p.absPath == "" {
|
||||
return ""
|
||||
}
|
||||
if p.cmdPath == "" {
|
||||
p.cmdPath = filepath.Join(p.absPath, findCmdDir(p.absPath))
|
||||
}
|
||||
return p.cmdPath
|
||||
}
|
||||
|
||||
// findCmdDir checks if base of absPath is cmd dir and returns it or
|
||||
// looks for existing cmd dir in absPath.
|
||||
// If the cmd dir doesn't exist, empty, or cannot be found,
|
||||
// it returns "cmd".
|
||||
func findCmdDir(absPath string) string {
|
||||
if !exists(absPath) || isEmpty(absPath) {
|
||||
return "cmd"
|
||||
}
|
||||
|
||||
if isCmdDir(absPath) {
|
||||
return filepath.Base(absPath)
|
||||
}
|
||||
|
||||
files, _ := filepath.Glob(filepath.Join(absPath, "c*"))
|
||||
for _, file := range files {
|
||||
if isCmdDir(file) {
|
||||
return filepath.Base(file)
|
||||
}
|
||||
}
|
||||
|
||||
return "cmd"
|
||||
}
|
||||
|
||||
// isCmdDir checks if base of name is one of cmdDir.
|
||||
func isCmdDir(name string) bool {
|
||||
name = filepath.Base(name)
|
||||
for _, cmdDir := range cmdDirs {
|
||||
if name == cmdDir {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AbsPath returns absolute path of project.
|
||||
func (p Project) AbsPath() string {
|
||||
return p.absPath
|
||||
}
|
||||
|
||||
// SrcPath returns absolute path to $GOPATH/src where project is located.
|
||||
func (p *Project) SrcPath() string {
|
||||
if p.srcPath != "" {
|
||||
return p.srcPath
|
||||
}
|
||||
if p.absPath == "" {
|
||||
p.srcPath = srcPaths[0]
|
||||
return p.srcPath
|
||||
}
|
||||
|
||||
for _, srcPath := range srcPaths {
|
||||
if filepathHasPrefix(p.absPath, srcPath) {
|
||||
p.srcPath = srcPath
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return p.srcPath
|
||||
}
|
||||
|
||||
func filepathHasPrefix(path string, prefix string) bool {
|
||||
if len(path) <= len(prefix) {
|
||||
return false
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
// Paths in windows are case-insensitive.
|
||||
return strings.EqualFold(path[0:len(prefix)], prefix)
|
||||
}
|
||||
return path[0:len(prefix)] == prefix
|
||||
|
||||
}
|
24
vendor/src/github.com/spf13/cobra/cobra/cmd/project_test.go
vendored
Normal file
24
vendor/src/github.com/spf13/cobra/cobra/cmd/project_test.go
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindExistingPackage(t *testing.T) {
|
||||
path := findPackage("github.com/spf13/cobra")
|
||||
if path == "" {
|
||||
t.Fatal("findPackage didn't find the existing package")
|
||||
}
|
||||
if !hasGoPathPrefix(path) {
|
||||
t.Fatalf("%q is not in GOPATH, but must be", path)
|
||||
}
|
||||
}
|
||||
|
||||
func hasGoPathPrefix(path string) bool {
|
||||
for _, srcPath := range srcPaths {
|
||||
if filepathHasPrefix(path, srcPath) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -15,57 +15,64 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var cfgFile string
|
||||
var userLicense string
|
||||
var (
|
||||
// Used for flags.
|
||||
cfgFile, userLicense string
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "cobra",
|
||||
Short: "A generator for Cobra based Applications",
|
||||
Long: `Cobra is a CLI library for Go that empowers applications.
|
||||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.`,
|
||||
}
|
||||
|
||||
//Execute adds all child commands to the root command sets flags appropriately.
|
||||
func Execute() {
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
)
|
||||
|
||||
// Execute executes the root command.
|
||||
func Execute() {
|
||||
rootCmd.Execute()
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
|
||||
RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory, e.g. github.com/spf13/")
|
||||
RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
|
||||
RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `license` in config)")
|
||||
RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
|
||||
viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
|
||||
viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase"))
|
||||
viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper"))
|
||||
initViper()
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
|
||||
rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
|
||||
rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
|
||||
rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
|
||||
viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
|
||||
viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
|
||||
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
|
||||
viper.SetDefault("license", "apache")
|
||||
|
||||
rootCmd.AddCommand(addCmd)
|
||||
rootCmd.AddCommand(initCmd)
|
||||
}
|
||||
|
||||
// Read in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
if cfgFile != "" { // enable ability to specify config file via flag
|
||||
func initViper() {
|
||||
if cfgFile != "" {
|
||||
// Use config file from the flag.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
} else {
|
||||
// Find home directory.
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
er(err)
|
||||
}
|
||||
|
||||
viper.SetConfigName(".cobra") // name of config file (without extension)
|
||||
viper.AddConfigPath(os.Getenv("HOME")) // adding home directory as first search path
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
// Search config in home directory with name ".cobra" (without extension).
|
||||
viper.AddConfigPath(home)
|
||||
viper.SetConfigName(".cobra")
|
||||
}
|
||||
|
||||
viper.AutomaticEnv()
|
||||
|
||||
// If a config file is found, read it in.
|
||||
if err := viper.ReadInConfig(); err == nil {
|
||||
fmt.Println("Using config file:", viper.ConfigFileUsed())
|
||||
}
|
||||
|
|
202
vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
vendored
Normal file
202
vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
20
vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/main.go.golden
vendored
Normal file
20
vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/main.go.golden
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/spf13/testproject/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
88
vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/root.go.golden
vendored
Normal file
88
vendor/src/github.com/spf13/cobra/cobra/cmd/testdata/root.go.golden
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var cfgFile string
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "testproject",
|
||||
Short: "A brief description of your application",
|
||||
Long: `A longer description that spans multiple lines and likely contains
|
||||
examples and usage of using your application. For example:
|
||||
|
||||
Cobra is a CLI library for Go that empowers applications.
|
||||
This application is a tool to generate the needed files
|
||||
to quickly create a Cobra application.`,
|
||||
// Uncomment the following line if your bare application
|
||||
// has an action associated with it:
|
||||
// Run: func(cmd *cobra.Command, args []string) { },
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
// Here you will define your flags and configuration settings.
|
||||
// Cobra supports persistent flags, which, if defined here,
|
||||
// will be global for your application.
|
||||
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.testproject.yaml)")
|
||||
|
||||
// Cobra also supports local flags, which will only run
|
||||
// when this action is called directly.
|
||||
RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
if cfgFile != "" {
|
||||
// Use config file from the flag.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
} else {
|
||||
// Find home directory.
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Search config in home directory with name ".testproject" (without extension).
|
||||
viper.AddConfigPath(home)
|
||||
viper.SetConfigName(".testproject")
|
||||
}
|
||||
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
|
||||
// If a config file is found, read it in.
|
||||
if err := viper.ReadInConfig(); err == nil {
|
||||
fmt.Println("Using config file:", viper.ConfigFileUsed())
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue