diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 72f1ca4bc..d40401810 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -81,6 +81,13 @@ { "ImportPath": "github.com/stevvooe/resumable", "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" + }, + { + "ImportPath": "github.com/ncw/swift", + "Rev": "22c8fa9fb5ba145b4d4e2cebb027e84b1a7b1296" }, { "ImportPath": "github.com/yvasiyarov/go-metrics", diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..7f3fe9a96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + +script: + - go test diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..659d6885f --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..aa91f76ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,151 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = reflect.ValueOf(data).Type() + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go new file mode 100644 index 000000000..53289afcf --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -0,0 +1,229 @@ +package mapstructure + +import ( + "errors" + "reflect" + "testing" + "time" +) + +func TestComposeDecodeHookFunc(t *testing.T) { + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "foo", nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "bar", nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + result, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if result.(string) != "foobar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestComposeDecodeHookFunc_err(t *testing.T) { + f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + return nil, errors.New("foo") + } + + f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + panic("NOPE") + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) + if err.Error() != "foo" { + t.Fatalf("bad: %s", err) + } +} + +func TestComposeDecodeHookFunc_kinds(t *testing.T) { + var f2From reflect.Kind + + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return int(42), nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + f2From = f + return data, nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if f2From != reflect.Int { + t.Fatalf("bad: %#v", f2From) + } +} + +func TestStringToSliceHookFunc(t *testing.T) { + f := StringToSliceHookFunc(",") + + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {sliceType, sliceType, 42, 42, false}, + {strType, strType, 42, 42, false}, + { + strType, + sliceType, + "foo,bar,baz", + []string{"foo", "bar", "baz"}, + false, + }, + { + strType, + sliceType, + "", + []string{}, + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeDurationHookFunc(t *testing.T) { + f := StringToTimeDurationHookFunc() + + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Duration(5)) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {strType, timeType, "5s", 5 * time.Second, false}, + {strType, timeType, "5", time.Duration(0), true}, + {strType, strType, "5", "5", false}, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestWeaklyTypedHook(t *testing.T) { + var f DecodeHookFunc = WeaklyTypedHook + + boolType := reflect.TypeOf(true) + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + // TO STRING + { + boolType, + strType, + false, + "0", + false, + }, + + { + boolType, + strType, + true, + "1", + false, + }, + + { + reflect.TypeOf(float32(1)), + strType, + float32(7), + "7", + false, + }, + + { + reflect.TypeOf(int(1)), + strType, + int(7), + "7", + false, + }, + + { + sliceType, + strType, + []uint8("foo"), + "foo", + false, + }, + + { + reflect.TypeOf(uint(1)), + strType, + uint(7), + "7", + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..d3cb4e8f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,746 @@ +// The mapstructure package exposes functionality to convert an +// abitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // Accept empty array/slice instead of an empty map in weakly typed mode + if d.config.WeaklyTypedInput && + (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && + dataVal.Len() == 0 { + val.Set(valMap) + return nil + } else { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + realVal := reflect.New(valElemType) + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) + continue + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append(structs, val.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey, _ := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey, _ := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey, _ := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go new file mode 100644 index 000000000..b50ac36e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go @@ -0,0 +1,243 @@ +package mapstructure + +import ( + "testing" +) + +func Benchmark_Decode(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeBasic(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeTypeConversion(b *testing.B) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + var resultStrict TypeConversionResult + for i := 0; i < b.N; i++ { + Decode(input, &resultStrict) + } +} + +func Benchmark_DecodeMap(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeMapOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSlice(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + var result Slice + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSliceOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadata(b *testing.B) { + type Person struct { + Name string + Age int + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadataEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + b.Fatalf("err: %s", err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeTagged(b *testing.B) { + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go new file mode 100644 index 000000000..7054f1ac9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -0,0 +1,47 @@ +package mapstructure + +import "testing" + +// GH-1 +func TestDecode_NilValue(t *testing.T) { + input := map[string]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} + +// GH-10 +func TestDecode_mapInterfaceInterface(t *testing.T) { + input := map[interface{}]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go new file mode 100644 index 000000000..f17c214a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go @@ -0,0 +1,203 @@ +package mapstructure + +import ( + "fmt" +) + +func ExampleDecode() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} +} + +func ExampleDecode_errors() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": 123, + "age": "bad value", + "emails": []int{1, 2, 3}, + } + + var result Person + err := Decode(input, &result) + if err == nil { + panic("should have an error") + } + + fmt.Println(err.Error()) + // Output: + // 5 error(s) decoding: + // + // * 'Age' expected type 'int', got unconvertible type 'string' + // * 'Emails[0]' expected type 'string', got unconvertible type 'int' + // * 'Emails[1]' expected type 'string', got unconvertible type 'int' + // * 'Emails[2]' expected type 'string', got unconvertible type 'int' + // * 'Name' expected type 'string', got unconvertible type 'int' +} + +func ExampleDecode_metadata() { + type Person struct { + Name string + Age int + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + // For metadata, we make a more advanced DecoderConfig so we can + // more finely configure the decoder that is used. In this case, we + // just tell the decoder we want to track metadata. + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + if err := decoder.Decode(input); err != nil { + panic(err) + } + + fmt.Printf("Unused keys: %#v", md.Unused) + // Output: + // Unused keys: []string{"email"} +} + +func ExampleDecode_weaklyTypedInput() { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + err = decoder.Decode(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} +} + +func ExampleDecode_tags() { + // Note that the mapstructure tags defined in the struct type + // can indicate which fields the values are mapped to. + type Person struct { + Name string `mapstructure:"person_name"` + Age int `mapstructure:"person_age"` + } + + input := map[string]interface{}{ + "person_name": "Mitchell", + "person_age": 91, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91} +} + +func ExampleDecode_embeddedStruct() { + // Squashing multiple embedded structs is allowed using the squash tag. + // This is demonstrated by creating a composite struct of multiple types + // and decoding into it. In this case, a person can carry with it both + // a Family and a Location, as well as their own FirstName. + type Family struct { + LastName string + } + type Location struct { + City string + } + type Person struct { + Family `mapstructure:",squash"` + Location `mapstructure:",squash"` + FirstName string + } + + input := map[string]interface{}{ + "FirstName": "Mitchell", + "LastName": "Hashimoto", + "City": "San Francisco", + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) + // Output: + // Mitchell Hashimoto, San Francisco +} diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go new file mode 100644 index 000000000..e05dcc66c --- /dev/null +++ b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -0,0 +1,954 @@ +package mapstructure + +import ( + "reflect" + "sort" + "testing" +) + +type Basic struct { + Vstring string + Vint int + Vuint uint + Vbool bool + Vfloat float64 + Vextra string + vsilent bool + Vdata interface{} +} + +type Embedded struct { + Basic + Vunique string +} + +type EmbeddedPointer struct { + *Basic + Vunique string +} + +type EmbeddedSquash struct { + Basic `mapstructure:",squash"` + Vunique string +} + +type Map struct { + Vfoo string + Vother map[string]string +} + +type MapOfStruct struct { + Value map[string]Basic +} + +type Nested struct { + Vfoo string + Vbar Basic +} + +type NestedPointer struct { + Vfoo string + Vbar *Basic +} + +type Slice struct { + Vfoo string + Vbar []string +} + +type SliceOfStruct struct { + Value []Basic +} + +type Tagged struct { + Extra string `mapstructure:"bar,what,what"` + Value string `mapstructure:"foo"` +} + +type TypeConversionResult struct { + IntToFloat float32 + IntToUint uint + IntToBool bool + IntToString string + UintToInt int + UintToFloat float32 + UintToBool bool + UintToString string + BoolToInt int + BoolToUint uint + BoolToFloat float32 + BoolToString string + FloatToInt int + FloatToUint uint + FloatToBool bool + FloatToString string + SliceUint8ToString string + StringToInt int + StringToUint uint + StringToBool bool + StringToFloat float32 + SliceToMap map[string]interface{} + MapToSlice []interface{} +} + +func TestBasicTypes(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Errorf("got an err: %s", err.Error()) + t.FailNow() + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vint) + } + + if result.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vuint) + } + + if result.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbool) + } + + if result.Vfloat != 42.42 { + t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) + } + + if result.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vextra) + } + + if result.vsilent != false { + t.Error("vsilent should not be set, it is unexported") + } + + if result.Vdata != 42 { + t.Error("vdata should be valid") + } +} + +func TestBasic_IntWithFloat(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": float64(42), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } +} + +func TestDecode_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "innerfoo" { + t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedPointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result EmbeddedPointer + err := Decode(input, &result) + if err == nil { + t.Fatal("should get error") + } +} + +func TestDecode_EmbeddedSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var result EmbeddedSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_DecodeHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { + if from == reflect.String && to != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_DecodeHookType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { + if from.Kind() == reflect.String && + to.Kind() != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_Nil(t *testing.T) { + t.Parallel() + + var input interface{} = nil + result := Basic{ + Vstring: "foo", + } + + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result.Vstring != "foo" { + t.Fatalf("bad: %#v", result.Vstring) + } +} + +func TestDecode_NonStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + + var result map[string]string + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["foo"] != "bar" { + t.Fatal("foo is not bar") + } +} + +func TestDecode_StructMatch(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vbar": Basic{ + Vstring: "foo", + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("bad: %#v", result) + } +} + +func TestDecode_TypeConversion(t *testing.T) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "SliceUint8ToString": []uint8("foo"), + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + expectedResultStrict := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + UintToInt: 42, + UintToFloat: 42, + BoolToInt: 0, + BoolToUint: 0, + BoolToFloat: 0, + FloatToInt: 42, + FloatToUint: 42, + } + + expectedResultWeak := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + IntToBool: true, + IntToString: "42", + UintToInt: 42, + UintToFloat: 42, + UintToBool: true, + UintToString: "42", + BoolToInt: 1, + BoolToUint: 1, + BoolToFloat: 1, + BoolToString: "1", + FloatToInt: 42, + FloatToUint: 42, + FloatToBool: true, + FloatToString: "42.42", + SliceUint8ToString: "foo", + StringToInt: 42, + StringToUint: 42, + StringToBool: true, + StringToFloat: 42.42, + SliceToMap: map[string]interface{}{}, + MapToSlice: []interface{}{}, + } + + // Test strict type conversion + var resultStrict TypeConversionResult + err := Decode(input, &resultStrict) + if err == nil { + t.Errorf("should return an error") + } + if !reflect.DeepEqual(resultStrict, expectedResultStrict) { + t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) + } + + // Test weak type conversion + var decoder *Decoder + var resultWeak TypeConversionResult + + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &resultWeak, + } + + decoder, err = NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if !reflect.DeepEqual(resultWeak, expectedResultWeak) { + t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) + } +} + +func TestDecoder_ErrorUnused(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "hello", + "foo": "bar", + } + + var result Basic + config := &DecoderConfig{ + ErrorUnused: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err == nil { + t.Fatal("expected error") + } +} + +func TestMap(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vother == nil { + t.Fatal("vother should not be nil") + } + + if len(result.Vother) != 2 { + t.Error("vother should have two items") + } + + if result.Vother["foo"] != "foo" { + t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) + } + + if result.Vother["bar"] != "bar" { + t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) + } +} + +func TestMapMerge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + result.Vother = map[string]string{"hello": "world"} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + expected := map[string]string{ + "foo": "foo", + "bar": "bar", + "hello": "world", + } + if !reflect.DeepEqual(result.Vother, expected) { + t.Errorf("bad: %#v", result.Vother) + } +} + +func TestMapOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Value == nil { + t.Fatal("value should not be nil") + } + + if len(result.Value) != 2 { + t.Error("value should have two items") + } + + if result.Value["foo"].Vstring != "one" { + t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) + } + + if result.Value["bar"].Vstring != "two" { + t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) + } +} + +func TestNestedType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestNestedTypePointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result NestedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestSlice(t *testing.T) { + t.Parallel() + + inputStringSlice := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + inputStringSlicePointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[]string{"foo", "bar", "baz"}, + } + + outputStringSlice := &Slice{ + "foo", + []string{"foo", "bar", "baz"}, + } + + testSliceInput(t, inputStringSlice, outputStringSlice) + testSliceInput(t, inputStringSlicePointer, outputStringSlice) +} + +func TestInvalidSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Slice{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestSliceOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestInvalidType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": 42, + } + + var result Basic + err := Decode(input, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok := err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegIntUint := map[string]interface{}{ + "vuint": -42, + } + + err = Decode(inputNegIntUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegFloatUint := map[string]interface{}{ + "vuint": -42.0, + } + + err = Decode(inputNegFloatUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vstring", "Vunique"} + + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestNonPtrValue(t *testing.T) { + t.Parallel() + + err := Decode(map[string]interface{}{}, Basic{}) + if err == nil { + t.Fatal("error should exist") + } + + if err.Error() != "result must be a pointer" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestTagged(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + err := Decode(input, &result) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if result.Value != "bar" { + t.Errorf("value should be 'bar', got: %#v", result.Value) + } + + if result.Extra != "value" { + t.Errorf("extra should be 'value', got: %#v", result.Extra) + } +} + +func TestWeakDecode(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + } + + var result struct { + Foo int + Bar string + } + + if err := WeakDecode(input, &result); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } +} + +func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { + var result Slice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == nil { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/.gitignore b/Godeps/_workspace/src/github.com/ncw/swift/.gitignore new file mode 100644 index 000000000..5cdbab794 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/.gitignore @@ -0,0 +1,4 @@ +*~ +*.pyc +test-env* +junk/ \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml b/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml new file mode 100644 index 000000000..391b1d191 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - 1.1.2 + - 1.2.2 + - 1.3 + - tip + +script: + - go test diff --git a/Godeps/_workspace/src/github.com/ncw/swift/COPYING b/Godeps/_workspace/src/github.com/ncw/swift/COPYING new file mode 100644 index 000000000..8c27c67fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/COPYING @@ -0,0 +1,20 @@ +Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/Godeps/_workspace/src/github.com/ncw/swift/README.md b/Godeps/_workspace/src/github.com/ncw/swift/README.md new file mode 100644 index 000000000..993560eab --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/README.md @@ -0,0 +1,128 @@ +Swift +===== + +This package provides an easy to use library for interfacing with +Swift / Openstack Object Storage / Rackspace cloud files from the Go +Language + +See here for package docs + + http://godoc.org/github.com/ncw/swift + +[![Build Status](https://travis-ci.org/ncw/swift.png)](https://travis-ci.org/ncw/swift) + +Install +------- + +Use go to install the library + + go get github.com/ncw/swift + +Usage +----- + +See here for full package docs + +- http://godoc.org/github.com/ncw/swift + +Here is a short example from the docs + + import "github.com/ncw/swift" + + // Create a connection + c := swift.Connection{ + UserName: "user", + ApiKey: "key", + AuthUrl: "auth_url", + Domain: "domain", // Name of the domain (v3 auth only) + Tenant: "tenant", // Name of the tenant (v2 auth only) + } + // Authenticate + err := c.Authenticate() + if err != nil { + panic(err) + } + // List all the containers + containers, err := c.ContainerNames(nil) + fmt.Println(containers) + // etc... + +Additions +--------- + +The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. + +Testing +------- + +To run the tests you can either use an embedded fake Swift server +either use a real Openstack Swift server or a Rackspace Cloud files account. + +When using a real Swift server, you need to set these environment variables +before running the tests + + export SWIFT_API_USER='user' + export SWIFT_API_KEY='key' + export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' + +And optionally these if using v2 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + +And optionally these if using v3 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + export SWIFT_API_DOMAIN_ID='domain id' + export SWIFT_API_DOMAIN='domain name' + +And optionally this if you want to skip server certificate validation + + export SWIFT_AUTH_INSECURE=1 + +And optionally this to configure the connect channel timeout, in seconds + + export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 + +And optionally this to configure the data channel timeout, in seconds + + export SWIFT_DATA_CHANNEL_TIMEOUT=60 + +Then run the tests with `go test` + +License +------- + +This is free software under the terms of MIT license (check COPYING file +included in this package). + +Contact and support +------------------- + +The project website is at: + +- https://github.com/ncw/swift + +There you can file bug reports, ask for help or contribute patches. + +Authors +------- + +- Nick Craig-Wood + +Contributors +------------ + +- Brian "bojo" Jones +- Janika Liiv +- Yamamoto, Hirotaka +- Stephen +- platformpurple +- Paul Querna +- Livio Soares +- thesyncim +- lsowen +- Sylvain Baubeau +- Chris Kastorff +- Dai HaoJun diff --git a/Godeps/_workspace/src/github.com/ncw/swift/auth.go b/Godeps/_workspace/src/github.com/ncw/swift/auth.go new file mode 100644 index 000000000..ca35d2379 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/auth.go @@ -0,0 +1,283 @@ +package swift + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "strings" +) + +// Auth defines the operations needed to authenticate with swift +// +// This encapsulates the different authentication schemes in use +type Authenticator interface { + Request(*Connection) (*http.Request, error) + Response(resp *http.Response) error + // The public storage URL - set Internal to true to read + // internal/service net URL + StorageUrl(Internal bool) string + // The access token + Token() string + // The CDN url if available + CdnUrl() string +} + +// newAuth - create a new Authenticator from the AuthUrl +// +// A hint for AuthVersion can be provided +func newAuth(c *Connection) (Authenticator, error) { + AuthVersion := c.AuthVersion + if AuthVersion == 0 { + if strings.Contains(c.AuthUrl, "v3") { + AuthVersion = 3 + } else if strings.Contains(c.AuthUrl, "v2") { + AuthVersion = 2 + } else if strings.Contains(c.AuthUrl, "v1") { + AuthVersion = 1 + } else { + return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") + } + } + switch AuthVersion { + case 1: + return &v1Auth{}, nil + case 2: + return &v2Auth{ + // Guess as to whether using API key or + // password it will try both eventually so + // this is just an optimization. + useApiKey: len(c.ApiKey) >= 32, + }, nil + case 3: + return &v3Auth{}, nil + } + return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) +} + +// ------------------------------------------------------------ + +// v1 auth +type v1Auth struct { + Headers http.Header // V1 auth: the authentication headers so extensions can access them +} + +// v1 Authentication - make request +func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { + req, err := http.NewRequest("GET", c.AuthUrl, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.UserAgent) + req.Header.Set("X-Auth-Key", c.ApiKey) + req.Header.Set("X-Auth-User", c.UserName) + return req, nil +} + +// v1 Authentication - read response +func (auth *v1Auth) Response(resp *http.Response) error { + auth.Headers = resp.Header + return nil +} + +// v1 Authentication - read storage url +func (auth *v1Auth) StorageUrl(Internal bool) string { + storageUrl := auth.Headers.Get("X-Storage-Url") + if Internal { + newUrl, err := url.Parse(storageUrl) + if err != nil { + return storageUrl + } + newUrl.Host = "snet-" + newUrl.Host + storageUrl = newUrl.String() + } + return storageUrl +} + +// v1 Authentication - read auth token +func (auth *v1Auth) Token() string { + return auth.Headers.Get("X-Auth-Token") +} + +// v1 Authentication - read cdn url +func (auth *v1Auth) CdnUrl() string { + return auth.Headers.Get("X-CDN-Management-Url") +} + +// ------------------------------------------------------------ + +// v2 Authentication +type v2Auth struct { + Auth *v2AuthResponse + Region string + useApiKey bool // if set will use API key not Password + useApiKeyOk bool // if set won't change useApiKey any more + notFirst bool // set after first run +} + +// v2 Authentication - make request +func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + // Toggle useApiKey if not first run and not OK yet + if auth.notFirst && !auth.useApiKeyOk { + auth.useApiKey = !auth.useApiKey + } + auth.notFirst = true + // Create a V2 auth request for the body of the connection + var v2i interface{} + if !auth.useApiKey { + // Normal swift authentication + v2 := v2AuthRequest{} + v2.Auth.PasswordCredentials.UserName = c.UserName + v2.Auth.PasswordCredentials.Password = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } else { + // Rackspace special with API Key + v2 := v2AuthRequestRackspace{} + v2.Auth.ApiKeyCredentials.UserName = c.UserName + v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } + body, err := json.Marshal(v2i) + if err != nil { + return nil, err + } + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + return req, nil +} + +// v2 Authentication - read response +func (auth *v2Auth) Response(resp *http.Response) error { + auth.Auth = new(v2AuthResponse) + err := readJson(resp, auth.Auth) + // If successfully read Auth then no need to toggle useApiKey any more + if err == nil { + auth.useApiKeyOk = true + } + return err +} + +// Finds the Endpoint Url of "type" from the v2AuthResponse using the +// Region if set or defaulting to the first one if not +// +// Returns "" if not found +func (auth *v2Auth) endpointUrl(Type string, Internal bool) string { + for _, catalog := range auth.Auth.Access.ServiceCatalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if auth.Region == "" || (auth.Region == endpoint.Region) { + if Internal { + return endpoint.InternalUrl + } else { + return endpoint.PublicUrl + } + } + } + } + } + return "" +} + +// v2 Authentication - read storage url +// +// If Internal is true then it reads the private (internal / service +// net) URL. +func (auth *v2Auth) StorageUrl(Internal bool) string { + return auth.endpointUrl("object-store", Internal) +} + +// v2 Authentication - read auth token +func (auth *v2Auth) Token() string { + return auth.Auth.Access.Token.Id +} + +// v2 Authentication - read cdn url +func (auth *v2Auth) CdnUrl() string { + return auth.endpointUrl("rax:object-cdn", false) +} + +// ------------------------------------------------------------ + +// V2 Authentication request +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequest struct { + Auth struct { + PasswordCredentials struct { + UserName string `json:"username"` + Password string `json:"password"` + } `json:"passwordCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication request - Rackspace variant +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequestRackspace struct { + Auth struct { + ApiKeyCredentials struct { + UserName string `json:"username"` + ApiKey string `json:"apiKey"` + } `json:"RAX-KSKEY:apiKeyCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication reply +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthResponse struct { + Access struct { + ServiceCatalog []struct { + Endpoints []struct { + InternalUrl string + PublicUrl string + Region string + TenantId string + } + Name string + Type string + } + Token struct { + Expires string + Id string + Tenant struct { + Id string + Name string + } + } + User struct { + DefaultRegion string `json:"RAX-AUTH:defaultRegion"` + Id string + Name string + Roles []struct { + Description string + Id string + Name string + TenantId string + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go b/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go new file mode 100644 index 000000000..efcb77e5f --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/auth_v3.go @@ -0,0 +1,207 @@ +package swift + +import ( + "bytes" + "encoding/json" + "net/http" + "strings" +) + +const ( + v3AuthMethodToken = "token" + v3AuthMethodPassword = "password" + v3InterfacePublic = "public" + v3InterfaceInternal = "internal" + v3InterfaceAdmin = "admin" + v3CatalogTypeObjectStore = "object-store" +) + +// V3 Authentication request +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://developer.openstack.org/api-ref-identity-v3.html +type v3AuthRequest struct { + Auth struct { + Identity struct { + Methods []string `json:"methods"` + Password *v3AuthPassword `json:"password,omitempty"` + Token *v3AuthToken `json:"token,omitempty"` + } `json:"identity"` + Scope *v3Scope `json:"scope,omitempty"` + } `json:"auth"` +} + +type v3Scope struct { + Project *v3Project `json:"project,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` +} + +type v3Domain struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +type v3Project struct { + Name string `json:"name,omitempty"` + Id string `json:"id,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` +} + +type v3User struct { + Domain *v3Domain `json:"domain,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Password string `json:"password,omitempty"` +} + +type v3AuthToken struct { + Id string `json:"id"` +} + +type v3AuthPassword struct { + User v3User `json:"user"` +} + +// V3 Authentication response +type v3AuthResponse struct { + Token struct { + Expires_At, Issued_At string + Methods []string + Roles []map[string]string + + Project struct { + Domain struct { + Id, Name string + } + Id, Name string + } + + Catalog []struct { + Id, Namem, Type string + Endpoints []struct { + Id, Region_Id, Url, Region, Interface string + } + } + + User struct { + Id, Name string + Domain struct { + Id, Name string + Links struct { + Self string + } + } + } + + Audit_Ids []string + } +} + +type v3Auth struct { + Auth *v3AuthResponse + Headers http.Header +} + +func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { + + var v3i interface{} + + v3 := v3AuthRequest{} + + if c.UserName == "" { + v3.Auth.Identity.Methods = []string{v3AuthMethodToken} + v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} + } else { + v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} + v3.Auth.Identity.Password = &v3AuthPassword{ + User: v3User{ + Name: c.UserName, + Password: c.ApiKey, + }, + } + + var domain *v3Domain + + if c.Domain != "" { + domain = &v3Domain{Name: c.Domain} + } else if c.DomainId != "" { + domain = &v3Domain{Id: c.DomainId} + } + v3.Auth.Identity.Password.User.Domain = domain + } + + if c.TenantId != "" || c.Tenant != "" { + + v3.Auth.Scope = &v3Scope{Project: &v3Project{}} + + if c.TenantId != "" { + v3.Auth.Scope.Project.Id = c.TenantId + } else if c.Tenant != "" { + v3.Auth.Scope.Project.Name = c.Tenant + var defaultDomain v3Domain + if c.Domain != "" { + defaultDomain = v3Domain{Name: "Default"} + } else if c.DomainId != "" { + defaultDomain = v3Domain{Id: "Default"} + } + v3.Auth.Scope.Project.Domain = &defaultDomain + } + } + + v3i = v3 + + body, err := json.Marshal(v3i) + + if err != nil { + return nil, err + } + + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + return req, nil +} + +func (auth *v3Auth) Response(resp *http.Response) error { + auth.Auth = &v3AuthResponse{} + auth.Headers = resp.Header + err := readJson(resp, auth.Auth) + return err +} + +func (auth *v3Auth) endpointUrl(Type string, Internal bool) string { + for _, catalog := range auth.Auth.Token.Catalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if Internal { + if endpoint.Interface == v3InterfaceInternal { + return endpoint.Url + } + } else { + if endpoint.Interface == v3InterfacePublic { + return endpoint.Url + } + } + } + } + } + return "" +} + +func (auth *v3Auth) StorageUrl(Internal bool) string { + return auth.endpointUrl(v3CatalogTypeObjectStore, Internal) +} + +func (auth *v3Auth) Token() string { + return auth.Headers.Get("X-Subject-Token") +} + +func (auth *v3Auth) CdnUrl() string { + return "" +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go b/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go new file mode 100644 index 000000000..7b69a757a --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_0.go @@ -0,0 +1,28 @@ +// Go 1.0 compatibility functions + +// +build !go1.1 + +package swift + +import ( + "log" + "net/http" + "time" +) + +// Cancel the request - doesn't work under < go 1.1 +func cancelRequest(transport http.RoundTripper, req *http.Request) { + log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") +} + +// Reset a timer - Doesn't work properly < go 1.1 +// +// This is quite hard to do properly under go < 1.1 so we do a crude +// approximation and hope that everyone upgrades to go 1.1 quickly +func resetTimer(t *time.Timer, d time.Duration) { + t.Stop() + // Very likely this doesn't actually work if we are already + // selecting on t.C. However we've stopped the original timer + // so won't break transfers but may not time them out :-( + *t = *time.NewTimer(d) +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go b/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go new file mode 100644 index 000000000..a4f9c3ab2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/compatibility_1_1.go @@ -0,0 +1,24 @@ +// Go 1.1 and later compatibility functions +// +// +build go1.1 + +package swift + +import ( + "net/http" + "time" +) + +// Cancel the request +func cancelRequest(transport http.RoundTripper, req *http.Request) { + if tr, ok := transport.(interface { + CancelRequest(*http.Request) + }); ok { + tr.CancelRequest(req) + } +} + +// Reset a timer +func resetTimer(t *time.Timer, d time.Duration) { + t.Reset(d) +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/doc.go b/Godeps/_workspace/src/github.com/ncw/swift/doc.go new file mode 100644 index 000000000..44efde7bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/doc.go @@ -0,0 +1,19 @@ +/* +Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files + +Standard Usage + +Most of the work is done through the Container*() and Object*() methods. + +All methods are safe to use concurrently in multiple go routines. + +Object Versioning + +As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. + +Rackspace Sub Module + +This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. + +*/ +package swift diff --git a/Godeps/_workspace/src/github.com/ncw/swift/example_test.go b/Godeps/_workspace/src/github.com/ncw/swift/example_test.go new file mode 100644 index 000000000..30cb98348 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/example_test.go @@ -0,0 +1,97 @@ +// Copyright... + +// This example demonstrates opening a Connection and doing some basic operations. +package swift_test + +import ( + "fmt" + + "github.com/ncw/swift" +) + +func ExampleConnection() { + // Create a v1 auth connection + c := swift.Connection{ + // This should be your username + UserName: "user", + // This should be your api key + ApiKey: "key", + // This should be a v1 auth url, eg + // Rackspace US https://auth.api.rackspacecloud.com/v1.0 + // Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 + // Memset Memstore UK https://auth.storage.memset.com/v1.0 + AuthUrl: "auth_url", + } + + // Authenticate + err := c.Authenticate() + if err != nil { + panic(err) + } + // List all the containers + containers, err := c.ContainerNames(nil) + fmt.Println(containers) + // etc... + + // ------ or alternatively create a v2 connection ------ + + // Create a v2 auth connection + c = swift.Connection{ + // This is the sub user for the storage - eg "admin" + UserName: "user", + // This should be your api key + ApiKey: "key", + // This should be a version2 auth url, eg + // Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 + // Memset Memstore v2 https://auth.storage.memset.com/v2.0 + AuthUrl: "v2_auth_url", + // Region to use - default is use first region if unset + Region: "LON", + // Name of the tenant - this is likely your username + Tenant: "jim", + } + + // as above... +} + +var container string + +func ExampleConnection_ObjectsWalk() { + objects := make([]string, 0) + err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) { + newObjects, err := c.ObjectNames(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + fmt.Println("Found all the objects", objects, err) +} + +func ExampleConnection_VersionContainerCreate() { + // Use the helper method to create the current and versions container. + if err := c.VersionContainerCreate("cds", "cd-versions"); err != nil { + fmt.Print(err.Error()) + } +} + +func ExampleConnection_VersionEnable() { + // Build the containers manually and enable them. + if err := c.ContainerCreate("movie-versions", nil); err != nil { + fmt.Print(err.Error()) + } + if err := c.ContainerCreate("movies", nil); err != nil { + fmt.Print(err.Error()) + } + if err := c.VersionEnable("movies", "movie-versions"); err != nil { + fmt.Print(err.Error()) + } + + // Access the primary container as usual with ObjectCreate(), ObjectPut(), etc. + // etc... +} + +func ExampleConnection_VersionDisable() { + // Disable versioning on a container. Note that this does not delete the versioning container. + c.VersionDisable("movies") +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/meta.go b/Godeps/_workspace/src/github.com/ncw/swift/meta.go new file mode 100644 index 000000000..e52d68608 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/meta.go @@ -0,0 +1,174 @@ +// Metadata manipulation in and out of Headers + +package swift + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +// Metadata stores account, container or object metadata. +type Metadata map[string]string + +// Metadata gets the Metadata starting with the metaPrefix out of the Headers. +// +// The keys in the Metadata will be converted to lower case +func (h Headers) Metadata(metaPrefix string) Metadata { + m := Metadata{} + metaPrefix = http.CanonicalHeaderKey(metaPrefix) + for key, value := range h { + if strings.HasPrefix(key, metaPrefix) { + metaKey := strings.ToLower(key[len(metaPrefix):]) + m[metaKey] = value + } + } + return m +} + +// AccountMetadata converts Headers from account to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) AccountMetadata() Metadata { + return h.Metadata("X-Account-Meta-") +} + +// ContainerMetadata converts Headers from container to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ContainerMetadata() Metadata { + return h.Metadata("X-Container-Meta-") +} + +// ObjectMetadata converts Headers from object to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ObjectMetadata() Metadata { + return h.Metadata("X-Object-Meta-") +} + +// Headers convert the Metadata starting with the metaPrefix into a +// Headers. +// +// The keys in the Metadata will be converted from lower case to http +// Canonical (see http.CanonicalHeaderKey). +func (m Metadata) Headers(metaPrefix string) Headers { + h := Headers{} + for key, value := range m { + key = http.CanonicalHeaderKey(metaPrefix + key) + h[key] = value + } + return h +} + +// AccountHeaders converts the Metadata for the account. +func (m Metadata) AccountHeaders() Headers { + return m.Headers("X-Account-Meta-") +} + +// ContainerHeaders converts the Metadata for the container. +func (m Metadata) ContainerHeaders() Headers { + return m.Headers("X-Container-Meta-") +} + +// ObjectHeaders converts the Metadata for the object. +func (m Metadata) ObjectHeaders() Headers { + return m.Headers("X-Object-Meta-") +} + +// Turns a number of ns into a floating point string in seconds +// +// Trims trailing zeros and guaranteed to be perfectly accurate +func nsToFloatString(ns int64) string { + if ns < 0 { + return "-" + nsToFloatString(-ns) + } + result := fmt.Sprintf("%010d", ns) + split := len(result) - 9 + result, decimals := result[:split], result[split:] + decimals = strings.TrimRight(decimals, "0") + if decimals != "" { + result += "." + result += decimals + } + return result +} + +// Turns a floating point string in seconds into a ns integer +// +// Guaranteed to be perfectly accurate +func floatStringToNs(s string) (int64, error) { + const zeros = "000000000" + if point := strings.IndexRune(s, '.'); point >= 0 { + tail := s[point+1:] + if fill := 9 - len(tail); fill < 0 { + tail = tail[:9] + } else { + tail += zeros[:fill] + } + s = s[:point] + tail + } else if len(s) > 0 { // Make sure empty string produces an error + s += zeros + } + return strconv.ParseInt(s, 10, 64) +} + +// FloatStringToTime converts a floating point number string to a time.Time +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number should be in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z" +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +// +// If an error is returned then time will be returned as the zero time. +func FloatStringToTime(s string) (t time.Time, err error) { + ns, err := floatStringToNs(s) + if err != nil { + return + } + t = time.Unix(0, ns) + return +} + +// TimeToFloatString converts a time.Time object to a floating point string +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number is in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped +// from the output. +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +func TimeToFloatString(t time.Time) string { + return nsToFloatString(t.UnixNano()) +} + +// Read a modification time (mtime) from a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +// +// If an error is returned then time will be returned as the zero time. +func (m Metadata) GetModTime() (t time.Time, err error) { + return FloatStringToTime(m["mtime"]) +} + +// Write an modification time (mtime) to a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +func (m Metadata) SetModTime(t time.Time) { + m["mtime"] = TimeToFloatString(t) +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go b/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go new file mode 100644 index 000000000..47560d576 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/meta_test.go @@ -0,0 +1,213 @@ +// Tests for swift metadata +package swift + +import ( + "testing" + "time" +) + +func TestHeadersToMetadata(t *testing.T) { +} + +func TestHeadersToAccountMetadata(t *testing.T) { +} + +func TestHeadersToContainerMetadata(t *testing.T) { +} + +func TestHeadersToObjectMetadata(t *testing.T) { +} + +func TestMetadataToHeaders(t *testing.T) { +} + +func TestMetadataToAccountHeaders(t *testing.T) { +} + +func TestMetadataToContainerHeaders(t *testing.T) { +} + +func TestMetadataToObjectHeaders(t *testing.T) { +} + +func TestNsToFloatString(t *testing.T) { + for _, d := range []struct { + ns int64 + fs string + }{ + {0, "0"}, + {1, "0.000000001"}, + {1000, "0.000001"}, + {1000000, "0.001"}, + {100000000, "0.1"}, + {1000000000, "1"}, + {10000000000, "10"}, + {12345678912, "12.345678912"}, + {12345678910, "12.34567891"}, + {12345678900, "12.3456789"}, + {12345678000, "12.345678"}, + {12345670000, "12.34567"}, + {12345600000, "12.3456"}, + {12345000000, "12.345"}, + {12340000000, "12.34"}, + {12300000000, "12.3"}, + {12000000000, "12"}, + {10000000000, "10"}, + {1347717491123123123, "1347717491.123123123"}, + } { + if nsToFloatString(d.ns) != d.fs { + t.Error("Failed", d.ns, "!=", d.fs) + } + if d.ns > 0 && nsToFloatString(-d.ns) != "-"+d.fs { + t.Error("Failed on negative", d.ns, "!=", d.fs) + } + } +} + +func TestFloatStringToNs(t *testing.T) { + for _, d := range []struct { + ns int64 + fs string + }{ + {0, "0"}, + {0, "0."}, + {0, ".0"}, + {0, "0.0"}, + {0, "0.0000000001"}, + {1, "0.000000001"}, + {1000, "0.000001"}, + {1000000, "0.001"}, + {100000000, "0.1"}, + {100000000, "0.10"}, + {100000000, "0.1000000001"}, + {1000000000, "1"}, + {1000000000, "1."}, + {1000000000, "1.0"}, + {10000000000, "10"}, + {12345678912, "12.345678912"}, + {12345678912, "12.3456789129"}, + {12345678912, "12.34567891299"}, + {12345678910, "12.34567891"}, + {12345678900, "12.3456789"}, + {12345678000, "12.345678"}, + {12345670000, "12.34567"}, + {12345600000, "12.3456"}, + {12345000000, "12.345"}, + {12340000000, "12.34"}, + {12300000000, "12.3"}, + {12000000000, "12"}, + {10000000000, "10"}, + // This is a typical value which has more bits in than a float64 + {1347717491123123123, "1347717491.123123123"}, + } { + ns, err := floatStringToNs(d.fs) + if err != nil { + t.Error("Failed conversion", err) + } + if ns != d.ns { + t.Error("Failed", d.fs, "!=", d.ns, "was", ns) + } + if d.ns > 0 { + ns, err := floatStringToNs("-" + d.fs) + if err != nil { + t.Error("Failed conversion", err) + } + if ns != -d.ns { + t.Error("Failed on negative", -d.ns, "!=", "-"+d.fs) + } + } + } + + // These are expected to produce errors + for _, fs := range []string{ + "", + " 1", + "- 1", + "- 1", + "1.-1", + "1.0.0", + "1x0", + } { + ns, err := floatStringToNs(fs) + if err == nil { + t.Error("Didn't produce expected error", fs, ns) + } + } + +} + +func TestGetModTime(t *testing.T) { + for _, d := range []struct { + ns string + t string + }{ + {"1354040105", "2012-11-27T18:15:05Z"}, + {"1354040105.", "2012-11-27T18:15:05Z"}, + {"1354040105.0", "2012-11-27T18:15:05Z"}, + {"1354040105.000000000000", "2012-11-27T18:15:05Z"}, + {"1354040105.123", "2012-11-27T18:15:05.123Z"}, + {"1354040105.123456", "2012-11-27T18:15:05.123456Z"}, + {"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"}, + {"1354040105.123456789123", "2012-11-27T18:15:05.123456789Z"}, + {"0", "1970-01-01T00:00:00.000000000Z"}, + } { + expected, err := time.Parse(time.RFC3339, d.t) + if err != nil { + t.Error("Bad test", err) + } + m := Metadata{"mtime": d.ns} + actual, err := m.GetModTime() + if err != nil { + t.Error("Parse error", err) + } + if !actual.Equal(expected) { + t.Error("Expecting", expected, expected.UnixNano(), "got", actual, actual.UnixNano()) + } + } + for _, ns := range []string{ + "EMPTY", + "", + " 1", + "- 1", + "- 1", + "1.-1", + "1.0.0", + "1x0", + } { + m := Metadata{} + if ns != "EMPTY" { + m["mtime"] = ns + } + actual, err := m.GetModTime() + if err == nil { + t.Error("Expected error not produced") + } + if !actual.IsZero() { + t.Error("Expected output to be zero") + } + } +} + +func TestSetModTime(t *testing.T) { + for _, d := range []struct { + ns string + t string + }{ + {"1354040105", "2012-11-27T18:15:05Z"}, + {"1354040105", "2012-11-27T18:15:05.000000Z"}, + {"1354040105.123", "2012-11-27T18:15:05.123Z"}, + {"1354040105.123456", "2012-11-27T18:15:05.123456Z"}, + {"1354040105.123456789", "2012-11-27T18:15:05.123456789Z"}, + {"0", "1970-01-01T00:00:00.000000000Z"}, + } { + time, err := time.Parse(time.RFC3339, d.t) + if err != nil { + t.Error("Bad test", err) + } + m := Metadata{} + m.SetModTime(time) + if m["mtime"] != d.ns { + t.Error("mtime wrong", m, "should be", d.ns) + } + } +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/notes.txt b/Godeps/_workspace/src/github.com/ncw/swift/notes.txt new file mode 100644 index 000000000..f738552cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/notes.txt @@ -0,0 +1,55 @@ +Notes on Go Swift +================= + +Make a builder style interface like the Google Go APIs? Advantages +are that it is easy to add named methods to the service object to do +specific things. Slightly less efficient. Not sure about how to +return extra stuff though - in an object? + +Make a container struct so these could be methods on it? + +Make noResponse check for 204? + +Make storage public so it can be extended easily? + +Rename to go-swift to match user agent string? + +Reconnect on auth error - 401 when token expires isn't tested + +Make more api compatible with python cloudfiles? + +Retry operations on timeout / network errors? +- also 408 error +- GET requests only? + +Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock + +Add extra headers field to Connection (for via etc) + +Make errors use an error heirachy then can catch them with a type assertion + + Error(...) + ObjectCorrupted{ Error } + +Make a Debug flag in connection for logging stuff + +Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc + +Object range + +Object create, update with X-Delete-At or X-Delete-After + +Large object support +- check uploads are less than 5GB in normal mode? + +Access control CORS? + +Swift client retries and backs off for all types of errors + +Implement net error interface? + +type Error interface { + error + Timeout() bool // Is the error a timeout? + Temporary() bool // Is the error temporary? +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go b/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go new file mode 100644 index 000000000..34ee15a0b --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/rs/rs.go @@ -0,0 +1,83 @@ +package rs + +import ( + "errors" + "net/http" + "strconv" + + "github.com/ncw/swift" +) + +// RsConnection is a RackSpace specific wrapper to the core swift library which +// exposes the RackSpace CDN commands via the CDN Management URL interface. +type RsConnection struct { + swift.Connection + cdnUrl string +} + +// manage is similar to the swift storage method, but uses the CDN Management URL for CDN specific calls. +func (c *RsConnection) manage(p swift.RequestOpts) (resp *http.Response, headers swift.Headers, err error) { + p.OnReAuth = func() (string, error) { + if c.cdnUrl == "" { + c.cdnUrl = c.Auth.CdnUrl() + } + if c.cdnUrl == "" { + return "", errors.New("The X-CDN-Management-Url does not exist on the authenticated platform") + } + return c.cdnUrl, nil + } + if c.Authenticated() { + _, err = p.OnReAuth() + if err != nil { + return nil, nil, err + } + } + return c.Connection.Call(c.cdnUrl, p) +} + +// ContainerCDNEnable enables a container for public CDN usage. +// +// Change the default TTL of 259200 seconds (72 hours) by passing in an integer value. +// +// This method can be called again to change the TTL. +func (c *RsConnection) ContainerCDNEnable(container string, ttl int) (swift.Headers, error) { + h := swift.Headers{"X-CDN-Enabled": "true"} + if ttl > 0 { + h["X-TTL"] = strconv.Itoa(ttl) + } + + _, headers, err := c.manage(swift.RequestOpts{ + Container: container, + Operation: "PUT", + ErrorMap: swift.ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return headers, err +} + +// ContainerCDNDisable disables CDN access to a container. +func (c *RsConnection) ContainerCDNDisable(container string) error { + h := swift.Headers{"X-CDN-Enabled": "false"} + + _, _, err := c.manage(swift.RequestOpts{ + Container: container, + Operation: "PUT", + ErrorMap: swift.ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerCDNMeta returns the CDN metadata for a container. +func (c *RsConnection) ContainerCDNMeta(container string) (swift.Headers, error) { + _, headers, err := c.manage(swift.RequestOpts{ + Container: container, + Operation: "HEAD", + ErrorMap: swift.ContainerErrorMap, + NoResponse: true, + Headers: swift.Headers{}, + }) + return headers, err +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go b/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go new file mode 100644 index 000000000..74205154c --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/rs/rs_test.go @@ -0,0 +1,96 @@ +// See swift_test.go for requirements to run this test. +package rs_test + +import ( + "os" + "testing" + + "github.com/ncw/swift/rs" +) + +var ( + c rs.RsConnection +) + +const ( + CONTAINER = "GoSwiftUnitTest" + OBJECT = "test_object" + CONTENTS = "12345" + CONTENT_SIZE = int64(len(CONTENTS)) + CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b" +) + +// Test functions are run in order - this one must be first! +func TestAuthenticate(t *testing.T) { + UserName := os.Getenv("SWIFT_API_USER") + ApiKey := os.Getenv("SWIFT_API_KEY") + AuthUrl := os.Getenv("SWIFT_AUTH_URL") + if UserName == "" || ApiKey == "" || AuthUrl == "" { + t.Fatal("SWIFT_API_USER, SWIFT_API_KEY and SWIFT_AUTH_URL not all set") + } + c = rs.RsConnection{} + c.UserName = UserName + c.ApiKey = ApiKey + c.AuthUrl = AuthUrl + err := c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} + +// Setup +func TestContainerCreate(t *testing.T) { + err := c.ContainerCreate(CONTAINER, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestCDNEnable(t *testing.T) { + headers, err := c.ContainerCDNEnable(CONTAINER, 0) + if err != nil { + t.Error(err) + } + if _, ok := headers["X-Cdn-Uri"]; !ok { + t.Error("Failed to enable CDN for container") + } +} + +func TestOnReAuth(t *testing.T) { + c2 := rs.RsConnection{} + c2.UserName = c.UserName + c2.ApiKey = c.ApiKey + c2.AuthUrl = c.AuthUrl + _, err := c2.ContainerCDNEnable(CONTAINER, 0) + if err != nil { + t.Fatalf("Failed to reauthenticate: %v", err) + } +} + +func TestCDNMeta(t *testing.T) { + headers, err := c.ContainerCDNMeta(CONTAINER) + if err != nil { + t.Error(err) + } + if _, ok := headers["X-Cdn-Uri"]; !ok { + t.Error("CDN is not enabled") + } +} + +func TestCDNDisable(t *testing.T) { + err := c.ContainerCDNDisable(CONTAINER) // files stick in CDN until TTL expires + if err != nil { + t.Error(err) + } +} + +// Teardown +func TestContainerDelete(t *testing.T) { + err := c.ContainerDelete(CONTAINER) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/swift.go b/Godeps/_workspace/src/github.com/ncw/swift/swift.go new file mode 100644 index 000000000..db8eba106 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/swift.go @@ -0,0 +1,1841 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/md5" + "encoding/json" + "fmt" + "hash" + "io" + "mime" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DefaultUserAgent = "goswift/1.0" // Default user agent + DefaultRetries = 3 // Default number of retries on token expiry + TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC + UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). + UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). + UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). + allContainersLimit = 10000 // Number of containers to fetch at once + allObjectsLimit = 10000 // Number objects to fetch at once + allObjectsChanLimit = 1000 // ...when fetching to a channel +) + +// Connection holds the details of the connection to the swift server. +// +// You need to provide UserName, ApiKey and AuthUrl when you create a +// connection then call Authenticate on it. +// +// The auth version in use will be detected from the AuthURL - you can +// override this with the AuthVersion parameter. +// +// If using v2 auth you can also set Region in the Connection +// structure. If you don't set Region you will get the default region +// which may not be what you want. +// +// For reference some common AuthUrls looks like this: +// +// Rackspace US https://auth.api.rackspacecloud.com/v1.0 +// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 +// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 +// Memset Memstore UK https://auth.storage.memset.com/v1.0 +// Memstore v2 https://auth.storage.memset.com/v2.0 +// +// When using Google Appengine you must provide the Connection with an +// appengine-specific Transport: +// +// import ( +// "appengine/urlfetch" +// "fmt" +// "github.com/ncw/swift" +// ) +// +// func handler(w http.ResponseWriter, r *http.Request) { +// ctx := appengine.NewContext(r) +// tr := urlfetch.Transport{Context: ctx} +// c := swift.Connection{ +// UserName: "user", +// ApiKey: "key", +// AuthUrl: "auth_url", +// Transport: tr, +// } +// _ := c.Authenticate() +// containers, _ := c.ContainerNames(nil) +// fmt.Fprintf(w, "containers: %q", containers) +// } +// +// If you don't supply a Transport, one is made which relies on +// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). +// This means that the connection will respect the HTTP proxy specified by the +// environment variables $HTTP_PROXY and $NO_PROXY. +type Connection struct { + // Parameters - fill these in before calling Authenticate + // They are all optional except UserName, ApiKey and AuthUrl + Domain string // User's domain name + DomainId string // User's domain Id + UserName string // UserName for api + ApiKey string // Key for api access + AuthUrl string // Auth URL + Retries int // Retries on error (default is 3) + UserAgent string // Http User agent (default goswift/1.0) + ConnectTimeout time.Duration // Connect channel timeout (default 10s) + Timeout time.Duration // Data channel timeout (default 60s) + Region string // Region to use eg "LON", "ORD" - default is use first region (V2 auth only) + AuthVersion int // Set to 1 or 2 or leave at 0 for autodetect + Internal bool // Set this to true to use the the internal / service network + Tenant string // Name of the tenant (v2 auth only) + TenantId string // Id of the tenant (v2 auth only) + Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) + // These are filled in after Authenticate is called as are the defaults for above + StorageUrl string + AuthToken string + client *http.Client + Auth Authenticator `json:"-" xml:"-"` // the current authenticator + authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth +} + +// Error - all errors generated by this package are of this type. Other error +// may be passed on from library functions though. +type Error struct { + StatusCode int // HTTP status code if relevant or 0 if not + Text string +} + +// Error satisfy the error interface. +func (e *Error) Error() string { + return e.Text +} + +// newError make a new error from a string. +func newError(StatusCode int, Text string) *Error { + return &Error{ + StatusCode: StatusCode, + Text: Text, + } +} + +// newErrorf makes a new error from sprintf parameters. +func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { + return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) +} + +// errorMap defines http error codes to error mappings. +type errorMap map[int]error + +var ( + // Specific Errors you might want to check for equality + BadRequest = newError(400, "Bad Request") + AuthorizationFailed = newError(401, "Authorization Failed") + ContainerNotFound = newError(404, "Container Not Found") + ContainerNotEmpty = newError(409, "Container Not Empty") + ObjectNotFound = newError(404, "Object Not Found") + ObjectCorrupted = newError(422, "Object Corrupted") + TimeoutError = newError(408, "Timeout when reading or writing data") + Forbidden = newError(403, "Operation forbidden") + TooLargeObject = newError(413, "Too Large Object") + + // Mappings for authentication errors + authErrorMap = errorMap{ + 400: BadRequest, + 401: AuthorizationFailed, + 403: Forbidden, + } + + // Mappings for container errors + ContainerErrorMap = errorMap{ + 400: BadRequest, + 403: Forbidden, + 404: ContainerNotFound, + 409: ContainerNotEmpty, + } + + // Mappings for object errors + objectErrorMap = errorMap{ + 400: BadRequest, + 403: Forbidden, + 404: ObjectNotFound, + 413: TooLargeObject, + 422: ObjectCorrupted, + } +) + +// checkClose is used to check the return from Close in a defer +// statement. +func checkClose(c io.Closer, err *error) { + cerr := c.Close() + if *err == nil { + *err = cerr + } +} + +// parseHeaders checks a response for errors and translates into +// standard errors if necessary. +func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { + if errorMap != nil { + if err, ok := errorMap[resp.StatusCode]; ok { + return err + } + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) + } + return nil +} + +// readHeaders returns a Headers object from the http.Response. +// +// If it receives multiple values for a key (which should never +// happen) it will use the first one +func readHeaders(resp *http.Response) Headers { + headers := Headers{} + for key, values := range resp.Header { + headers[key] = values[0] + } + return headers +} + +// Headers stores HTTP headers (can only have one of each header like Swift). +type Headers map[string]string + +// Does an http request using the running timer passed in +func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { + // Do the request in the background so we can check the timeout + type result struct { + resp *http.Response + err error + } + done := make(chan result, 1) + go func() { + resp, err := c.client.Do(req) + done <- result{resp, err} + }() + // Wait for the read or the timeout + select { + case r := <-done: + return r.resp, r.err + case <-timer.C: + // Kill the connection on timeout so we don't leak sockets or goroutines + cancelRequest(c.Transport, req) + return nil, TimeoutError + } + panic("unreachable") // For Go 1.0 +} + +// Set defaults for any unset values +// +// Call with authLock held +func (c *Connection) setDefaults() { + if c.UserAgent == "" { + c.UserAgent = DefaultUserAgent + } + if c.Retries == 0 { + c.Retries = DefaultRetries + } + if c.ConnectTimeout == 0 { + c.ConnectTimeout = 10 * time.Second + } + if c.Timeout == 0 { + c.Timeout = 60 * time.Second + } + if c.Transport == nil { + c.Transport = &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + } + } + if c.client == nil { + c.client = &http.Client{ + // CheckRedirect: redirectPolicyFunc, + Transport: c.Transport, + } + } +} + +// Authenticate connects to the Swift server. +// +// If you don't call it before calling one of the connection methods +// then it will be called for you on the first access. +func (c *Connection) Authenticate() (err error) { + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticate() +} + +// Internal implementation of Authenticate +// +// Call with authLock held +func (c *Connection) authenticate() (err error) { + c.setDefaults() + + // Flush the keepalives connection - if we are + // re-authenticating then stuff has gone wrong + flushKeepaliveConnections(c.Transport) + + if c.Auth == nil { + c.Auth, err = newAuth(c) + if err != nil { + return + } + } + + retries := 1 +again: + var req *http.Request + req, err = c.Auth.Request(c) + if err != nil { + return + } + timer := time.NewTimer(c.ConnectTimeout) + var resp *http.Response + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + return + } + defer func() { + checkClose(resp.Body, &err) + // Flush the auth connection - we don't want to keep + // it open if keepalives were enabled + flushKeepaliveConnections(c.Transport) + }() + if err = c.parseHeaders(resp, authErrorMap); err != nil { + // Try again for a limited number of times on + // AuthorizationFailed or BadRequest. This allows us + // to try some alternate forms of the request + if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { + retries-- + goto again + } + return + } + err = c.Auth.Response(resp) + if err != nil { + return + } + c.StorageUrl = c.Auth.StorageUrl(c.Internal) + c.AuthToken = c.Auth.Token() + if !c.authenticated() { + err = newError(0, "Response didn't have storage url and auth token") + return + } + return +} + +// Get an authToken and url +// +// The Url may be updated if it needed to authenticate using the OnReAuth function +func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { + c.authLock.Lock() + defer c.authLock.Unlock() + targetUrlOut = targetUrlIn + if !c.authenticated() { + err = c.authenticate() + if err != nil { + return + } + if OnReAuth != nil { + targetUrlOut, err = OnReAuth() + if err != nil { + return + } + } + } + authToken = c.AuthToken + return +} + +// flushKeepaliveConnections is called to flush pending requests after an error. +func flushKeepaliveConnections(transport http.RoundTripper) { + if tr, ok := transport.(interface { + CloseIdleConnections() + }); ok { + tr.CloseIdleConnections() + } +} + +// UnAuthenticate removes the authentication from the Connection. +func (c *Connection) UnAuthenticate() { + c.authLock.Lock() + c.StorageUrl = "" + c.AuthToken = "" + c.authLock.Unlock() +} + +// Authenticated returns a boolean to show if the current connection +// is authenticated. +// +// Doesn't actually check the credentials against the server. +func (c *Connection) Authenticated() bool { + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticated() +} + +// Internal version of Authenticated() +// +// Call with authLock held +func (c *Connection) authenticated() bool { + return c.StorageUrl != "" && c.AuthToken != "" +} + +// RequestOpts contains parameters for Connection.storage. +type RequestOpts struct { + Container string + ObjectName string + Operation string + Parameters url.Values + Headers Headers + ErrorMap errorMap + NoResponse bool + Body io.Reader + Retries int + // if set this is called on re-authentication to refresh the targetUrl + OnReAuth func() (string, error) +} + +// Call runs a remote command on the targetUrl, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the targetUrl +// +// Returns a response or an error. If response is returned then +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +// +// This method is exported so extensions can call it. +func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { + c.authLock.Lock() + c.setDefaults() + c.authLock.Unlock() + retries := p.Retries + if retries == 0 { + retries = c.Retries + } + var req *http.Request + for { + var authToken string + targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth) + + var URL *url.URL + URL, err = url.Parse(targetUrl) + if err != nil { + return + } + if p.Container != "" { + URL.Path += "/" + p.Container + if p.ObjectName != "" { + URL.Path += "/" + p.ObjectName + } + } + if p.Parameters != nil { + URL.RawQuery = p.Parameters.Encode() + } + timer := time.NewTimer(c.ConnectTimeout) + reader := p.Body + if reader != nil { + reader = newWatchdogReader(reader, c.Timeout, timer) + } + req, err = http.NewRequest(p.Operation, URL.String(), reader) + if err != nil { + return + } + if p.Headers != nil { + for k, v := range p.Headers { + req.Header.Add(k, v) + } + } + req.Header.Add("User-Agent", DefaultUserAgent) + req.Header.Add("X-Auth-Token", authToken) + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + if p.Operation == "HEAD" || p.Operation == "GET" { + retries-- + continue + } + return + } + // Check to see if token has expired + if resp.StatusCode == 401 && retries > 0 { + _ = resp.Body.Close() + c.UnAuthenticate() + retries-- + } else { + break + } + } + + if err = c.parseHeaders(resp, p.ErrorMap); err != nil { + _ = resp.Body.Close() + return nil, nil, err + } + headers = readHeaders(resp) + if p.NoResponse { + err = resp.Body.Close() + if err != nil { + return nil, nil, err + } + } else { + // Cancel the request on timeout + cancel := func() { + cancelRequest(c.Transport, req) + } + // Wrap resp.Body to make it obey an idle timeout + resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) + } + return +} + +// storage runs a remote command on a the storage url, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the storage url +// +// Returns a response or an error. If response is returned then +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { + p.OnReAuth = func() (string, error) { + return c.StorageUrl, nil + } + c.authLock.Lock() + url := c.StorageUrl + c.authLock.Unlock() + return c.Call(url, p) +} + +// readLines reads the response into an array of strings. +// +// Closes the response when done +func readLines(resp *http.Response) (lines []string, err error) { + defer checkClose(resp.Body, &err) + reader := bufio.NewReader(resp.Body) + buffer := bytes.NewBuffer(make([]byte, 0, 128)) + var part []byte + var prefix bool + for { + if part, prefix, err = reader.ReadLine(); err != nil { + break + } + buffer.Write(part) + if !prefix { + lines = append(lines, buffer.String()) + buffer.Reset() + } + } + if err == io.EOF { + err = nil + } + return +} + +// readJson reads the response into the json type passed in +// +// Closes the response when done +func readJson(resp *http.Response, result interface{}) (err error) { + defer checkClose(resp.Body, &err) + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(result) +} + +/* ------------------------------------------------------------ */ + +// ContainersOpts is options for Containers() and ContainerNames() +type ContainersOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Marker string // Given a string value x, return object names greater in value than the specified marker. + EndMarker string // Given a string value x, return container names less in value than the specified marker. + Headers Headers // Any additional HTTP headers - can be nil +} + +// parse the ContainerOpts +func (opts *ContainersOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + h = opts.Headers + } + return v, h +} + +// ContainerNames returns a slice of names of containers in this account. +func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + lines, err := readLines(resp) + return lines, err +} + +// Container contains information about a container +type Container struct { + Name string // Name of the container + Count int64 // Number of objects in the container + Bytes int64 // Total number of bytes used in the container +} + +// Containers returns a slice of structures with full information as +// described in Container. +func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var containers []Container + err = readJson(resp, &containers) + return containers, err +} + +// containersAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +func containersAllOpts(opts *ContainersOpts) *ContainersOpts { + var newOpts ContainersOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = allContainersLimit + } + newOpts.Marker = "" + return &newOpts +} + +// ContainersAll is like Containers but it returns all the Containers +// +// It calls Containers multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { + opts = containersAllOpts(opts) + containers := make([]Container, 0) + for { + newContainers, err := c.Containers(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1].Name + } + return containers, nil +} + +// ContainerNamesAll is like ContainerNamess but it returns all the Containers +// +// It calls ContainerNames multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { + opts = containersAllOpts(opts) + containers := make([]string, 0) + for { + newContainers, err := c.ContainerNames(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1] + } + return containers, nil +} + +/* ------------------------------------------------------------ */ + +// ObjectOpts is options for Objects() and ObjectNames() +type ObjectsOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Marker string // Given a string value x, return object names greater in value than the specified marker. + EndMarker string // Given a string value x, return object names less in value than the specified marker + Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. + Path string // For a string value x, return the object names nested in the pseudo path + Delimiter rune // For a character c, return all the object names nested in the container + Headers Headers // Any additional HTTP headers - can be nil +} + +// parse reads values out of ObjectsOpts +func (opts *ObjectsOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Path != "" { + v.Set("path", opts.Path) + } + if opts.Delimiter != 0 { + v.Set("delimiter", string(opts.Delimiter)) + } + h = opts.Headers + } + return v, h +} + +// ObjectNames returns a slice of names of objects in a given container. +func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + return readLines(resp) +} + +// Object contains information about an object +type Object struct { + Name string `json:"name"` // object name + ContentType string `json:"content_type"` // eg application/directory + Bytes int64 `json:"bytes"` // size in bytes + ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server + LastModified time.Time // Last modified time converted to a time.Time + Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist + SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" +} + +// Objects returns a slice of Object with information about each +// object in the container. +// +// If Delimiter is set in the opts then PseudoDirectory may be set, +// with ContentType 'application/directory'. These are not real +// objects but represent directories of objects which haven't had an +// object created for them. +func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var objects []Object + err = readJson(resp, &objects) + // Convert Pseudo directories and dates + for i := range objects { + object := &objects[i] + if object.SubDir != "" { + object.Name = object.SubDir + object.PseudoDirectory = true + object.ContentType = "application/directory" + } + if object.ServerLastModified != "" { + // 2012-11-11T14:49:47.887250 + // + // Remove fractional seconds if present. This + // then keeps it consistent with Object + // which can only return timestamps accurate + // to 1 second + // + // The TimeFormat will parse fractional + // seconds if desired though + datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] + object.LastModified, err = time.Parse(TimeFormat, datetime) + if err != nil { + return nil, err + } + } + } + return objects, err +} + +// objectsAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { + var newOpts ObjectsOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = Limit + } + newOpts.Marker = "" + return &newOpts +} + +// A closure defined by the caller to iterate through all objects +// +// Call Objects or ObjectNames from here with the *ObjectOpts passed in +// +// Do whatever is required with the results then return them +type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) + +// ObjectsWalk is uses to iterate through all the objects in chunks as +// returned by Objects or ObjectNames using the Marker and Limit +// parameters in the ObjectsOpts. +// +// Pass in a closure `walkFn` which calls Objects or ObjectNames with +// the *ObjectsOpts passed to it and does something with the results. +// +// Errors will be returned from this function +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { + opts = objectsAllOpts(opts, allObjectsChanLimit) + for { + objects, err := walkFn(opts) + if err != nil { + return err + } + var n int + var last string + switch objects := objects.(type) { + case []string: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1] + } + case []Object: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1].Name + } + default: + panic("Unknown type returned to ObjectsWalk") + } + if n < opts.Limit { + break + } + opts.Marker = last + } + return nil +} + +// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice +// +// It calls Objects multiple times using the Marker parameter +func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { + objects := make([]Object, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.Objects(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// ObjectNamesAll is like ObjectNames but it returns all the Objects +// +// It calls ObjectNames multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { + objects := make([]string, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.ObjectNames(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// Account contains information about this account. +type Account struct { + BytesUsed int64 // total number of bytes used + Containers int64 // total number of containers + Objects int64 // total number of objects +} + +// getInt64FromHeader is a helper function to decode int64 from header. +func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { + value := resp.Header.Get(header) + result, err = strconv.ParseInt(value, 10, 64) + if err != nil { + err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) + } + return +} + +// Account returns info about the account in an Account struct. +func (c *Connection) Account() (info Account, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into a dict + // + // {'Accept-Ranges': 'bytes', + // 'Content-Length': '0', + // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', + // 'X-Account-Bytes-Used': '316598182', + // 'X-Account-Container-Count': '4', + // 'X-Account-Object-Count': '1433'} + if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { + return + } + if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { + return + } + if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { + return + } + return +} + +// AccountUpdate adds, replaces or remove account metadata. +// +// Add or update keys by mentioning them in the Headers. +// +// Remove keys by setting them to an empty string. +func (c *Connection) AccountUpdate(h Headers) error { + _, _, err := c.storage(RequestOpts{ + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerCreate creates a container. +// +// If you don't want to add Headers just pass in nil +// +// No error is returned if it already exists but the metadata if any will be updated. +func (c *Connection) ContainerCreate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "PUT", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerDelete deletes a container. +// +// May return ContainerDoesNotExist or ContainerNotEmpty +func (c *Connection) ContainerDelete(container string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "DELETE", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + return err +} + +// Container returns info about a single container including any +// metadata in the headers. +func (c *Connection) Container(container string) (info Container, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + info.Name = container + if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { + return + } + if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { + return + } + return +} + +// ContainerUpdate adds, replaces or removes container metadata. +// +// Add or update keys by mentioning them in the Metadata. +// +// Remove keys by setting them to an empty string. +// +// Container metadata can only be read with Container() not with Containers(). +func (c *Connection) ContainerUpdate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ------------------------------------------------------------ + +// ObjectCreateFile represents a swift object open for writing +type ObjectCreateFile struct { + checkHash bool // whether we are checking the hash + pipeReader *io.PipeReader // pipe for the caller to use + pipeWriter *io.PipeWriter + hash hash.Hash // hash being build up as we go along + done chan struct{} // signals when the upload has finished + resp *http.Response // valid when done has signalled + err error // ditto + headers Headers // ditto +} + +// Write bytes to the object - see io.Writer +func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { + n, err = file.pipeWriter.Write(p) + if err == io.ErrClosedPipe { + if file.err != nil { + return 0, file.err + } + return 0, newError(500, "Write on closed file") + } + if err == nil && file.checkHash { + _, _ = file.hash.Write(p) + } + return +} + +// Close the object and checks the md5sum if it was required. +// +// Also returns any other errors from the server (eg container not +// found) so it is very important to check the errors on this method. +func (file *ObjectCreateFile) Close() error { + // Close the body + err := file.pipeWriter.Close() + if err != nil { + return err + } + + // Wait for the HTTP operation to complete + <-file.done + + // Check errors + if file.err != nil { + return file.err + } + if file.checkHash { + receivedMd5 := strings.ToLower(file.headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + return ObjectCorrupted + } + } + return nil +} + +// Check it satisfies the interface +var _ io.WriteCloser = &ObjectCreateFile{} + +// objectPutHeaders create a set of headers for a PUT +// +// It guesses the contentType from the objectName if it isn't set +// +// checkHash may be changed +func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { + if contentType == "" { + contentType = mime.TypeByExtension(path.Ext(objectName)) + if contentType == "" { + contentType = "application/octet-stream" + } + } + // Meta stuff + extraHeaders := map[string]string{ + "Content-Type": contentType, + } + for key, value := range h { + extraHeaders[key] = value + } + if Hash != "" { + extraHeaders["Etag"] = Hash + *checkHash = false // the server will do it + } + return extraHeaders +} + +// ObjectCreate creates or updates the object in the container. It +// returns an io.WriteCloser you should write the contents to. You +// MUST call Close() on it and you MUST check the error return from +// Close(). +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted on Close() +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted on Close() if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + pipeReader, pipeWriter := io.Pipe() + file = &ObjectCreateFile{ + hash: md5.New(), + checkHash: checkHash, + pipeReader: pipeReader, + pipeWriter: pipeWriter, + done: make(chan struct{}), + } + // Run the PUT in the background piping it data + go func() { + file.resp, file.headers, file.err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: pipeReader, + NoResponse: true, + ErrorMap: objectErrorMap, + }) + // Signal finished + pipeReader.Close() + close(file.done) + }() + return +} + +// ObjectPut creates or updates the path in the container from +// contents. contents should be an open io.Reader which will have all +// its contents read. +// +// This is a low level interface. +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted. +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + hash := md5.New() + var body io.Reader = contents + if checkHash { + body = io.TeeReader(contents, hash) + } + _, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: body, + NoResponse: true, + ErrorMap: objectErrorMap, + }) + if err != nil { + return + } + if checkHash { + receivedMd5 := strings.ToLower(headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + return +} + +// ObjectPutBytes creates an object from a []byte in a container. +// +// This is a simplified interface which checks the MD5. +func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { + buf := bytes.NewBuffer(contents) + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) + return +} + +// ObjectPutString creates an object from a string in a container. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { + buf := strings.NewReader(contents) + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) + return +} + +// ObjectOpenFile represents a swift object open for reading +type ObjectOpenFile struct { + connection *Connection // stored copy of Connection used in Open + container string // stored copy of container used in Open + objectName string // stored copy of objectName used in Open + headers Headers // stored copy of headers used in Open + resp *http.Response // http connection + body io.Reader // read data from this + checkHash bool // true if checking MD5 + hash hash.Hash // currently accumulating MD5 + bytes int64 // number of bytes read on this connection + eof bool // whether we have read end of file + pos int64 // current position when reading + lengthOk bool // whether length is valid + length int64 // length of the object if read + seeked bool // whether we have seeked this file or not +} + +// Read bytes from the object - see io.Reader +func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { + n, err = file.body.Read(p) + file.bytes += int64(n) + file.pos += int64(n) + if err == io.EOF { + file.eof = true + } + return +} + +// Seek sets the offset for the next Read to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 +// means relative to the current offset, and 2 means relative to the +// end. Seek returns the new offset and an Error, if any. +// +// Seek uses HTTP Range headers which, if the file pointer is moved, +// will involve reopening the HTTP connection. +// +// Note that you can't seek to the end of a file or beyond; HTTP Range +// requests don't support the file pointer being outside the data, +// unlike os.File +// +// Seek(0, 1) will return the current file pointer. +func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { + switch whence { + case 0: // relative to start + newPos = offset + case 1: // relative to current + newPos = file.pos + offset + case 2: // relative to end + if !file.lengthOk { + return file.pos, newError(0, "Length of file unknown so can't seek from end") + } + newPos = file.length + offset + default: + panic("Unknown whence in ObjectOpenFile.Seek") + } + // If at correct position (quite likely), do nothing + if newPos == file.pos { + return + } + // Close the file... + file.seeked = true + err = file.Close() + if err != nil { + return + } + // ...and re-open with a Range header + if file.headers == nil { + file.headers = Headers{} + } + if newPos > 0 { + file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) + } else { + delete(file.headers, "Range") + } + newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) + if err != nil { + return + } + // Update the file + file.resp = newFile.resp + file.body = newFile.body + file.checkHash = false + file.pos = newPos + return +} + +// Length gets the objects content length either from a cached copy or +// from the server. +func (file *ObjectOpenFile) Length() (int64, error) { + if !file.lengthOk { + info, _, err := file.connection.Object(file.container, file.objectName) + file.length = info.Bytes + file.lengthOk = (err == nil) + return file.length, err + } + return file.length, nil +} + +// Close the object and checks the length and md5sum if it was +// required and all the object was read +func (file *ObjectOpenFile) Close() (err error) { + // Close the body at the end + defer checkClose(file.resp.Body, &err) + + // If not end of file or seeked then can't check anything + if !file.eof || file.seeked { + return + } + + // Check the MD5 sum if requested + if file.checkHash { + receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + + // Check to see we read the correct number of bytes + if file.lengthOk && file.length != file.bytes { + err = ObjectCorrupted + return + } + return +} + +// Check it satisfies the interfaces +var _ io.ReadCloser = &ObjectOpenFile{} +var _ io.Seeker = &ObjectOpenFile{} + +// ObjectOpen returns an ObjectOpenFile for reading the contents of +// the object. This satisfies the io.ReadCloser and the io.Seeker +// interfaces. +// +// You must call Close() on contents when finished +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. It +// will also check the length returned. No checking will be done if +// you don't read all the contents. +// +// Note that objects with X-Object-Manifest set won't ever have their +// md5sum's checked as the md5sum reported on the object is actually +// the md5sum of the md5sums of the parts. This isn't very helpful to +// detect a corrupted download as the size of the parts aren't known +// without doing more operations. If you want to ensure integrity of +// an object with a manifest then you will need to download everything +// in the manifest separately. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "GET", + ErrorMap: objectErrorMap, + Headers: h, + }) + if err != nil { + return + } + // Can't check MD5 on an object with X-Object-Manifest set + if checkHash && headers["X-Object-Manifest"] != "" { + // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) + checkHash = false + } + file = &ObjectOpenFile{ + connection: c, + container: container, + objectName: objectName, + headers: h, + resp: resp, + checkHash: checkHash, + body: resp.Body, + } + if checkHash { + file.hash = md5.New() + file.body = io.TeeReader(resp.Body, file.hash) + } + // Read Content-Length + file.length, err = getInt64FromHeader(resp, "Content-Length") + file.lengthOk = (err == nil) + return +} + +// ObjectGet gets the object into the io.Writer contents. +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { + file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) + if err != nil { + return + } + defer checkClose(file, &err) + _, err = io.Copy(contents, file) + return +} + +// ObjectGetBytes returns an object as a []byte. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.Bytes() + return +} + +// ObjectGetString returns an object as a string. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.String() + return +} + +// ObjectDelete deletes the object. +// +// May return ObjectNotFound if the object isn't found +func (c *Connection) ObjectDelete(container string, objectName string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "DELETE", + ErrorMap: objectErrorMap, + }) + return err +} + +// parseResponseStatus parses string like "200 OK" and returns Error. +// +// For status codes beween 200 and 299, this returns nil. +func parseResponseStatus(resp string, errorMap errorMap) error { + code := 0 + reason := resp + t := strings.SplitN(resp, " ", 2) + if len(t) == 2 { + ncode, err := strconv.Atoi(t[0]) + if err == nil { + code = ncode + reason = t[1] + } + } + if errorMap != nil { + if err, ok := errorMap[code]; ok { + return err + } + } + if 200 <= code && code <= 299 { + return nil + } + return newError(code, reason) +} + +// BulkDeleteResult stores results of BulkDelete(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where the object was +// to be deleted, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkDeleteResult struct { + NumberNotFound int64 // # of objects not found. + NumberDeleted int64 // # of deleted objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +// BulkDelete deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { + var buffer bytes.Buffer + for _, s := range objectNames { + buffer.WriteString(fmt.Sprintf("/%s/%s\n", container, + url.QueryEscape(s))) + } + resp, headers, err := c.storage(RequestOpts{ + Operation: "DELETE", + Parameters: url.Values{"bulk-delete": []string{"1"}}, + Headers: Headers{ + "Accept": "application/json", + "Content-Type": "text/plain", + }, + ErrorMap: ContainerErrorMap, + Body: &buffer, + }) + if err != nil { + return + } + var jsonResult struct { + NotFound int64 `json:"Number Not Found"` + Status string `json:"Response Status"` + Errors [][]string + Deleted int64 `json:"Number Deleted"` + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberNotFound = jsonResult.NotFound + result.NumberDeleted = jsonResult.Deleted + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// BulkUploadResult stores results of BulkUpload(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where an object was +// to be created, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkUploadResult struct { + NumberCreated int64 // # of created objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +// BulkUpload uploads multiple files in one operation. +// +// uploadPath can be empty, a container name, or a pseudo-directory +// within a container. If uploadPath is empty, new containers may be +// automatically created. +// +// Files are read from dataStream. The format of the stream is specified +// by the format parameter. Available formats are: +// * UploadTar - Plain tar stream. +// * UploadTarGzip - Gzip compressed tar stream. +// * UploadTarBzip2 - Bzip2 compressed tar stream. +// +// Some servers may not accept bulk-upload requests since bulk-upload is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html +func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { + extraHeaders := Headers{"Accept": "application/json"} + for key, value := range h { + extraHeaders[key] = value + } + // The following code abuses Container parameter intentionally. + // The best fix might be to rename Container to UploadPath. + resp, headers, err := c.storage(RequestOpts{ + Container: uploadPath, + Operation: "PUT", + Parameters: url.Values{"extract-archive": []string{format}}, + Headers: extraHeaders, + ErrorMap: ContainerErrorMap, + Body: dataStream, + }) + if err != nil { + return + } + // Detect old servers which don't support this feature + if headers["Content-Type"] != "application/json" { + err = Forbidden + return + } + var jsonResult struct { + Created int64 `json:"Number Files Created"` + Status string `json:"Response Status"` + Errors [][]string + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberCreated = jsonResult.Created + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// Object returns info about a single object including any metadata in the header. +// +// May return ObjectNotFound. +// +// Use headers.ObjectMetadata() to read the metadata in the Headers. +func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "HEAD", + ErrorMap: objectErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + // HTTP/1.1 200 OK + // Date: Thu, 07 Jun 2010 20:59:39 GMT + // Server: Apache + // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT + // ETag: 8a964ee2a5e88be344f36c22562a6486 + // Content-Length: 512000 + // Content-Type: text/plain; charset=UTF-8 + // X-Object-Meta-Meat: Bacon + // X-Object-Meta-Fruit: Bacon + // X-Object-Meta-Veggie: Bacon + // X-Object-Meta-Dairy: Bacon + info.Name = objectName + info.ContentType = resp.Header.Get("Content-Type") + if resp.Header.Get("Content-Length") != "" { + if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { + return + } + } + info.ServerLastModified = resp.Header.Get("Last-Modified") + if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { + return + } + info.Hash = resp.Header.Get("Etag") + return +} + +// ObjectUpdate adds, replaces or removes object metadata. +// +// Add or Update keys by mentioning them in the Metadata. Use +// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your +// Metadata to and from normal HTTP headers. +// +// This removes all metadata previously added to the object and +// replaces it with that passed in so to delete keys, just don't +// mention them the headers you pass in. +// +// Object metadata can only be read with Object() not with Objects(). +// +// This can also be used to set headers not already assigned such as +// X-Delete-At or X-Delete-After for expiring objects. +// +// You cannot use this to change any of the object's other headers +// such as Content-Type, ETag, etc. +// +// Refer to copying an object when you need to update metadata or +// other headers such as Content-Type or CORS headers. +// +// May return ObjectNotFound. +func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "POST", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ObjectCopy does a server side copy of an object to a new position +// +// All metadata is preserved. If metadata is set in the headers then +// it overrides the old metadata on the copied object. +// +// The destination container must exist before the copy. +// +// You can use this to copy an object to itself - this is the only way +// to update the content type of an object. +func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { + // Meta stuff + extraHeaders := map[string]string{ + "Destination": dstContainer + "/" + dstObjectName, + } + for key, value := range h { + extraHeaders[key] = value + } + _, headers, err = c.storage(RequestOpts{ + Container: srcContainer, + ObjectName: srcObjectName, + Operation: "COPY", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: extraHeaders, + }) + return +} + +// ObjectMove does a server side move of an object to a new position +// +// This is a convenience method which calls ObjectCopy then ObjectDelete +// +// All metadata is preserved. +// +// The destination container must exist before the copy. +func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { + _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) + if err != nil { + return + } + return c.ObjectDelete(srcContainer, srcObjectName) +} + +// ObjectUpdateContentType updates the content type of an object +// +// This is a convenience method which calls ObjectCopy +// +// All other metadata is preserved. +func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { + h := Headers{"Content-Type": contentType} + _, err = c.ObjectCopy(container, objectName, container, objectName, h) + return +} + +// ------------------------------------------------------------ + +// VersionContainerCreate is a helper method for creating and enabling version controlled containers. +// +// It builds the current object container, the non-current object version container, and enables versioning. +// +// If the server doesn't support versioning then it will return +// Forbidden however it will have created both the containers at that point. +func (c *Connection) VersionContainerCreate(current, version string) error { + if err := c.ContainerCreate(version, nil); err != nil { + return err + } + if err := c.ContainerCreate(current, nil); err != nil { + return err + } + if err := c.VersionEnable(current, version); err != nil { + return err + } + return nil +} + +// VersionEnable enables versioning on the current container with version as the tracking container. +// +// May return Forbidden if this isn't supported by the server +func (c *Connection) VersionEnable(current, version string) error { + h := Headers{"X-Versions-Location": version} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + // Check to see if the header was set properly + _, headers, err := c.Container(current) + if err != nil { + return err + } + // If failed to set versions header, return Forbidden as the server doesn't support this + if headers["X-Versions-Location"] != version { + return Forbidden + } + return nil +} + +// VersionDisable disables versioning on the current container. +func (c *Connection) VersionDisable(current string) error { + h := Headers{"X-Versions-Location": ""} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + return nil +} + +// VersionObjectList returns a list of older versions of the object. +// +// Objects are returned in the format / +func (c *Connection) VersionObjectList(version, object string) ([]string, error) { + opts := &ObjectsOpts{ + // <3-character zero-padded hexadecimal character length>/ + Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", + } + return c.ObjectNames(version, opts) +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go b/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go new file mode 100644 index 000000000..e8b1f4378 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/swift_internal_test.go @@ -0,0 +1,409 @@ +// This tests the swift package internals +// +// It does not require access to a swift server +// +// FIXME need to add more tests and to check URLs and parameters +package swift + +import ( + "fmt" + "io" + "net" + "net/http" + "testing" + + // "net/http/httputil" + // "os" +) + +const ( + TEST_ADDRESS = "localhost:5324" + AUTH_URL = "http://" + TEST_ADDRESS + "/v1.0" + PROXY_URL = "http://" + TEST_ADDRESS + "/proxy" + USERNAME = "test" + APIKEY = "apikey" + AUTH_TOKEN = "token" +) + +// Globals +var ( + server *SwiftServer + c *Connection +) + +// SwiftServer implements a test swift server +type SwiftServer struct { + t *testing.T + checks []*Check +} + +// Used to check and reply to http transactions +type Check struct { + in Headers + out Headers + rx *string + tx *string + err *Error + url *string +} + +// Add a in check +func (check *Check) In(in Headers) *Check { + check.in = in + return check +} + +// Add an out check +func (check *Check) Out(out Headers) *Check { + check.out = out + return check +} + +// Add an Error check +func (check *Check) Error(StatusCode int, Text string) *Check { + check.err = newError(StatusCode, Text) + return check +} + +// Add a rx check +func (check *Check) Rx(rx string) *Check { + check.rx = &rx + return check +} + +// Add an tx check +func (check *Check) Tx(tx string) *Check { + check.tx = &tx + return check +} + +// Add an URL check +func (check *Check) Url(url string) *Check { + check.url = &url + return check +} + +// Add a check +func (s *SwiftServer) AddCheck(t *testing.T) *Check { + server.t = t + check := &Check{ + in: Headers{}, + out: Headers{}, + err: nil, + } + s.checks = append(s.checks, check) + return check +} + +// Responds to a request +func (s *SwiftServer) Respond(w http.ResponseWriter, r *http.Request) { + if len(s.checks) < 1 { + s.t.Fatal("Unexpected http transaction") + } + check := s.checks[0] + s.checks = s.checks[1:] + + // Check URL + if check.url != nil && *check.url != r.URL.String() { + s.t.Errorf("Expecting URL %q but got %q", *check.url, r.URL) + } + + // Check headers + for k, v := range check.in { + actual := r.Header.Get(k) + if actual != v { + s.t.Errorf("Expecting header %q=%q but got %q", k, v, actual) + } + } + // Write output headers + h := w.Header() + for k, v := range check.out { + h.Set(k, v) + } + // Return an error if required + if check.err != nil { + http.Error(w, check.err.Text, check.err.StatusCode) + } else { + if check.tx != nil { + _, err := w.Write([]byte(*check.tx)) + if err != nil { + s.t.Error("Write failed", err) + } + } + } +} + +// Checks to see all responses are used up +func (s *SwiftServer) Finished() { + if len(s.checks) > 0 { + s.t.Error("Unused checks", s.checks) + } +} + +func handle(w http.ResponseWriter, r *http.Request) { + // out, _ := httputil.DumpRequest(r, true) + // os.Stdout.Write(out) + server.Respond(w, r) +} + +func NewSwiftServer() *SwiftServer { + server := &SwiftServer{} + http.HandleFunc("/", handle) + go http.ListenAndServe(TEST_ADDRESS, nil) + fmt.Print("Waiting for server to start ") + for { + fmt.Print(".") + conn, err := net.Dial("tcp", TEST_ADDRESS) + if err == nil { + conn.Close() + fmt.Println(" Started") + break + } + } + return server +} + +func init() { + server = NewSwiftServer() + c = &Connection{ + UserName: USERNAME, + ApiKey: APIKEY, + AuthUrl: AUTH_URL, + } +} + +// Check the error is a swift error +func checkError(t *testing.T, err error, StatusCode int, Text string) { + if err == nil { + t.Fatal("No error returned") + } + err2, ok := err.(*Error) + if !ok { + t.Fatal("Bad error type") + } + if err2.StatusCode != StatusCode { + t.Fatalf("Bad status code, expecting %d got %d", StatusCode, err2.StatusCode) + } + if err2.Text != Text { + t.Fatalf("Bad error string, expecting %q got %q", Text, err2.Text) + } +} + +// FIXME copied from swift_test.go +func compareMaps(t *testing.T, a, b map[string]string) { + if len(a) != len(b) { + t.Error("Maps different sizes", a, b) + } + for ka, va := range a { + if vb, ok := b[ka]; !ok || va != vb { + t.Error("Difference in key", ka, va, b[ka]) + } + } + for kb, vb := range b { + if va, ok := a[kb]; !ok || vb != va { + t.Error("Difference in key", kb, vb, a[kb]) + } + } +} + +func TestInternalError(t *testing.T) { + e := newError(404, "Not Found!") + if e.StatusCode != 404 || e.Text != "Not Found!" { + t.Fatal("Bad error") + } + if e.Error() != "Not Found!" { + t.Fatal("Bad error") + } + +} + +func testCheckClose(c io.Closer, e error) (err error) { + err = e + defer checkClose(c, &err) + return +} + +// Make a closer which returns the error of our choice +type myCloser struct { + err error +} + +func (c *myCloser) Close() error { + return c.err +} + +func TestInternalCheckClose(t *testing.T) { + if testCheckClose(&myCloser{nil}, nil) != nil { + t.Fatal("bad 1") + } + if testCheckClose(&myCloser{nil}, ObjectCorrupted) != ObjectCorrupted { + t.Fatal("bad 2") + } + if testCheckClose(&myCloser{ObjectNotFound}, nil) != ObjectNotFound { + t.Fatal("bad 3") + } + if testCheckClose(&myCloser{ObjectNotFound}, ObjectCorrupted) != ObjectCorrupted { + t.Fatal("bad 4") + } +} + +func TestInternalParseHeaders(t *testing.T) { + resp := &http.Response{StatusCode: 200} + if c.parseHeaders(resp, nil) != nil { + t.Error("Bad 1") + } + if c.parseHeaders(resp, authErrorMap) != nil { + t.Error("Bad 1") + } + + resp = &http.Response{StatusCode: 299} + if c.parseHeaders(resp, nil) != nil { + t.Error("Bad 1") + } + + resp = &http.Response{StatusCode: 199, Status: "BOOM"} + checkError(t, c.parseHeaders(resp, nil), 199, "HTTP Error: 199: BOOM") + + resp = &http.Response{StatusCode: 300, Status: "BOOM"} + checkError(t, c.parseHeaders(resp, nil), 300, "HTTP Error: 300: BOOM") + + resp = &http.Response{StatusCode: 404, Status: "BOOM"} + checkError(t, c.parseHeaders(resp, nil), 404, "HTTP Error: 404: BOOM") + if c.parseHeaders(resp, ContainerErrorMap) != ContainerNotFound { + t.Error("Bad 1") + } + if c.parseHeaders(resp, objectErrorMap) != ObjectNotFound { + t.Error("Bad 1") + } +} + +func TestInternalReadHeaders(t *testing.T) { + resp := &http.Response{Header: http.Header{}} + compareMaps(t, readHeaders(resp), Headers{}) + + resp = &http.Response{Header: http.Header{ + "one": []string{"1"}, + "two": []string{"2"}, + }} + compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"}) + + // FIXME this outputs a log which we should test and check + resp = &http.Response{Header: http.Header{ + "one": []string{"1", "11", "111"}, + "two": []string{"2"}, + }} + compareMaps(t, readHeaders(resp), Headers{"one": "1", "two": "2"}) +} + +func TestInternalStorage(t *testing.T) { + // FIXME +} + +// ------------------------------------------------------------ + +func TestInternalAuthenticate(t *testing.T) { + server.AddCheck(t).In(Headers{ + "User-Agent": DefaultUserAgent, + "X-Auth-Key": APIKEY, + "X-Auth-User": USERNAME, + }).Out(Headers{ + "X-Storage-Url": PROXY_URL, + "X-Auth-Token": AUTH_TOKEN, + }).Url("/v1.0") + defer server.Finished() + + err := c.Authenticate() + if err != nil { + t.Fatal(err) + } + if c.StorageUrl != PROXY_URL { + t.Error("Bad storage url") + } + if c.AuthToken != AUTH_TOKEN { + t.Error("Bad auth token") + } + if !c.Authenticated() { + t.Error("Didn't authenticate") + } +} + +func TestInternalAuthenticateDenied(t *testing.T) { + server.AddCheck(t).Error(400, "Bad request") + server.AddCheck(t).Error(401, "DENIED") + defer server.Finished() + c.UnAuthenticate() + err := c.Authenticate() + if err != AuthorizationFailed { + t.Fatal("Expecting AuthorizationFailed", err) + } + // FIXME + // if c.Authenticated() { + // t.Fatal("Expecting not authenticated") + // } +} + +func TestInternalAuthenticateBad(t *testing.T) { + server.AddCheck(t).Out(Headers{ + "X-Storage-Url": PROXY_URL, + }) + defer server.Finished() + err := c.Authenticate() + checkError(t, err, 0, "Response didn't have storage url and auth token") + if c.Authenticated() { + t.Fatal("Expecting not authenticated") + } + + server.AddCheck(t).Out(Headers{ + "X-Auth-Token": AUTH_TOKEN, + }) + err = c.Authenticate() + checkError(t, err, 0, "Response didn't have storage url and auth token") + if c.Authenticated() { + t.Fatal("Expecting not authenticated") + } + + server.AddCheck(t) + err = c.Authenticate() + checkError(t, err, 0, "Response didn't have storage url and auth token") + if c.Authenticated() { + t.Fatal("Expecting not authenticated") + } + + server.AddCheck(t).Out(Headers{ + "X-Storage-Url": PROXY_URL, + "X-Auth-Token": AUTH_TOKEN, + }) + err = c.Authenticate() + if err != nil { + t.Fatal(err) + } + if !c.Authenticated() { + t.Fatal("Expecting authenticated") + } +} + +func testContainerNames(t *testing.T, rx string, expected []string) { + server.AddCheck(t).In(Headers{ + "User-Agent": DefaultUserAgent, + "X-Auth-Token": AUTH_TOKEN, + }).Tx(rx).Url("/proxy") + containers, err := c.ContainerNames(nil) + if err != nil { + t.Fatal(err) + } + if len(containers) != len(expected) { + t.Fatal("Wrong number of containers", len(containers), rx, len(expected), expected) + } + for i := range containers { + if containers[i] != expected[i] { + t.Error("Bad container", containers[i], expected[i]) + } + } +} +func TestInternalContainerNames(t *testing.T) { + defer server.Finished() + testContainerNames(t, "", []string{}) + testContainerNames(t, "one", []string{"one"}) + testContainerNames(t, "one\n", []string{"one"}) + testContainerNames(t, "one\ntwo\nthree\n", []string{"one", "two", "three"}) +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go b/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go new file mode 100644 index 000000000..57f7d9e86 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/swift_test.go @@ -0,0 +1,1472 @@ +// This tests the swift packagae +// +// It can be used with a real swift server which should be set up in +// the environment variables SWIFT_API_USER, SWIFT_API_KEY and +// SWIFT_AUTH_URL +// In case those variables are not defined, a fake Swift server +// is used instead - see Testing in README.md for more info +// +// The functions are designed to run in order and create things the +// next function tests. This means that if it goes wrong it is likely +// errors will propagate. You may need to tidy up the CONTAINER to +// get it to run cleanly. +package swift_test + +import ( + "archive/tar" + "bytes" + "crypto/md5" + "crypto/tls" + "encoding/json" + "encoding/xml" + "fmt" + "github.com/ncw/swift" + "github.com/ncw/swift/swifttest" + "io" + "net/http" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +var ( + c *swift.Connection + srv *swifttest.SwiftServer + m1 = swift.Metadata{"Hello": "1", "potato-Salad": "2"} + m2 = swift.Metadata{"hello": "", "potato-salad": ""} + skipVersionTests = false +) + +const ( + CONTAINER = "GoSwiftUnitTest" + VERSIONS_CONTAINER = "GoSwiftUnitTestVersions" + CURRENT_CONTAINER = "GoSwiftUnitTestCurrent" + OBJECT = "test_object" + OBJECT2 = "test_object2" + EMPTYOBJECT = "empty_test_object" + CONTENTS = "12345" + CONTENTS2 = "54321" + CONTENT_SIZE = int64(len(CONTENTS)) + CONTENT_MD5 = "827ccb0eea8a706c4c34a16891f84e7b" + EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e" +) + +type someTransport struct{ http.Transport } + +func makeConnection() (*swift.Connection, error) { + var err error + + UserName := os.Getenv("SWIFT_API_USER") + ApiKey := os.Getenv("SWIFT_API_KEY") + AuthUrl := os.Getenv("SWIFT_AUTH_URL") + + Insecure := os.Getenv("SWIFT_AUTH_INSECURE") + ConnectionChannelTimeout := os.Getenv("SWIFT_CONNECTION_CHANNEL_TIMEOUT") + DataChannelTimeout := os.Getenv("SWIFT_DATA_CHANNEL_TIMEOUT") + + if UserName == "" || ApiKey == "" || AuthUrl == "" { + if srv != nil { + srv.Close() + } + srv, err = swifttest.NewSwiftServer("localhost") + if err != nil { + return nil, err + } + + UserName = "swifttest" + ApiKey = "swifttest" + AuthUrl = srv.AuthURL + } + + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + } + if Insecure == "1" { + transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } + + c := swift.Connection{ + UserName: UserName, + ApiKey: ApiKey, + AuthUrl: AuthUrl, + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 60 * time.Second, + } + + var timeout int64 + if ConnectionChannelTimeout != "" { + timeout, err = strconv.ParseInt(ConnectionChannelTimeout, 10, 32) + if err == nil { + c.ConnectTimeout = time.Duration(timeout) * time.Second + } + } + + if DataChannelTimeout != "" { + timeout, err = strconv.ParseInt(DataChannelTimeout, 10, 32) + if err == nil { + c.Timeout = time.Duration(timeout) * time.Second + } + } + + return &c, nil +} + +func isV3Api() bool { + AuthUrl := os.Getenv("SWIFT_AUTH_URL") + return strings.Contains(AuthUrl, "v3") +} + +func TestTransport(t *testing.T) { + var err error + + c, err = makeConnection() + if err != nil { + t.Fatal("Failed to create server", err) + } + + tr := &someTransport{ + Transport: http.Transport{ + MaxIdleConnsPerHost: 2048, + }, + } + + Insecure := os.Getenv("SWIFT_AUTH_INSECURE") + + if Insecure == "1" { + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } + + c.Transport = tr + + if isV3Api() { + c.Tenant = os.Getenv("SWIFT_TENANT") + c.Domain = os.Getenv("SWIFT_API_DOMAIN") + } else { + c.Tenant = os.Getenv("SWIFT_TENANT") + c.TenantId = os.Getenv("SWIFT_TENANT_ID") + } + + err = c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } + if srv != nil { + srv.Close() + } +} + +// The following Test functions are run in order - this one must come before the others! +func TestV1V2Authenticate(t *testing.T) { + var err error + + if isV3Api() { + return + } + + c, err = makeConnection() + if err != nil { + t.Fatal("Failed to create server", err) + } + + c.Tenant = os.Getenv("SWIFT_TENANT") + c.TenantId = os.Getenv("SWIFT_TENANT_ID") + + err = c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} + +func TestV3AuthenticateWithDomainNameAndTenantId(t *testing.T) { + var err error + if !isV3Api() { + return + } + + c, err = makeConnection() + if err != nil { + t.Fatal("Failed to create server", err) + } + + c.TenantId = os.Getenv("SWIFT_TENANT_ID") + c.Domain = os.Getenv("SWIFT_API_DOMAIN") + + err = c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} + +func TestV3AuthenticateWithDomainIdAndTenantId(t *testing.T) { + var err error + + if !isV3Api() { + return + } + + c, err = makeConnection() + if err != nil { + t.Fatal("Failed to create server", err) + } + + c.TenantId = os.Getenv("SWIFT_TENANT_ID") + c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID") + + err = c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} + +func TestV3AuthenticateWithDomainNameAndTenantName(t *testing.T) { + var err error + + if !isV3Api() { + return + } + + c, err = makeConnection() + if err != nil { + t.Fatal("Failed to create server", err) + } + + c.Tenant = os.Getenv("SWIFT_TENANT") + c.Domain = os.Getenv("SWIFT_API_DOMAIN") + + err = c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} + +func TestV3AuthenticateWithDomainIdAndTenantName(t *testing.T) { + var err error + + if !isV3Api() { + return + } + + c, err = makeConnection() + if err != nil { + t.Fatal("Failed to create server", err) + } + + c.Tenant = os.Getenv("SWIFT_TENANT") + c.DomainId = os.Getenv("SWIFT_API_DOMAIN_ID") + + err = c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} + +// Attempt to trigger a race in authenticate +// +// Run with -race to test +func TestAuthenticateRace(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + err := c.Authenticate() + if err != nil { + t.Fatal("Auth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } + }() + } + wg.Wait() +} + +// Test a connection can be serialized and unserialized with JSON +func TestSerializeConnectionJson(t *testing.T) { + serializedConnection, err := json.Marshal(c) + if err != nil { + t.Fatalf("Failed to serialize connection: %v", err) + } + c2 := new(swift.Connection) + err = json.Unmarshal(serializedConnection, &c2) + if err != nil { + t.Fatalf("Failed to unserialize connection: %v", err) + } + if !c2.Authenticated() { + t.Fatal("Should be authenticated") + } + _, _, err = c2.Account() + if err != nil { + t.Fatalf("Failed to use unserialized connection: %v", err) + } +} + +// Test a connection can be serialized and unserialized with XML +func TestSerializeConnectionXml(t *testing.T) { + serializedConnection, err := xml.Marshal(c) + if err != nil { + t.Fatalf("Failed to serialize connection: %v", err) + } + c2 := new(swift.Connection) + err = xml.Unmarshal(serializedConnection, &c2) + if err != nil { + t.Fatalf("Failed to unserialize connection: %v", err) + } + if !c2.Authenticated() { + t.Fatal("Should be authenticated") + } + _, _, err = c2.Account() + if err != nil { + t.Fatalf("Failed to use unserialized connection: %v", err) + } +} + +// Test the reauthentication logic +func TestOnReAuth(t *testing.T) { + c2 := c + c2.UnAuthenticate() + _, _, err := c2.Account() + if err != nil { + t.Fatalf("Failed to reauthenticate: %v", err) + } +} +func TestAccount(t *testing.T) { + info, headers, err := c.Account() + if err != nil { + t.Fatal(err) + } + if headers["X-Account-Container-Count"] != fmt.Sprintf("%d", info.Containers) { + t.Error("Bad container count") + } + if headers["X-Account-Bytes-Used"] != fmt.Sprintf("%d", info.BytesUsed) { + t.Error("Bad bytes count") + } + if headers["X-Account-Object-Count"] != fmt.Sprintf("%d", info.Objects) { + t.Error("Bad objects count") + } + //fmt.Println(info) + //fmt.Println(headers) +} + +func compareMaps(t *testing.T, a, b map[string]string) { + if len(a) != len(b) { + t.Error("Maps different sizes", a, b) + } + for ka, va := range a { + if vb, ok := b[ka]; !ok || va != vb { + t.Error("Difference in key", ka, va, b[ka]) + } + } + for kb, vb := range b { + if va, ok := a[kb]; !ok || vb != va { + t.Error("Difference in key", kb, vb, a[kb]) + } + } +} + +func TestAccountUpdate(t *testing.T) { + err := c.AccountUpdate(m1.AccountHeaders()) + if err != nil { + t.Fatal(err) + } + + _, headers, err := c.Account() + if err != nil { + t.Fatal(err) + } + m := headers.AccountMetadata() + delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set + compareMaps(t, m, map[string]string{"hello": "1", "potato-salad": "2"}) + + err = c.AccountUpdate(m2.AccountHeaders()) + if err != nil { + t.Fatal(err) + } + + _, headers, err = c.Account() + if err != nil { + t.Fatal(err) + } + m = headers.AccountMetadata() + delete(m, "temp-url-key") // remove X-Account-Meta-Temp-URL-Key if set + compareMaps(t, m, map[string]string{}) + + //fmt.Println(c.Account()) + //fmt.Println(headers) + //fmt.Println(headers.AccountMetadata()) + //fmt.Println(c.AccountUpdate(m2.AccountHeaders())) + //fmt.Println(c.Account()) +} + +func TestContainerCreate(t *testing.T) { + err := c.ContainerCreate(CONTAINER, m1.ContainerHeaders()) + if err != nil { + t.Fatal(err) + } +} + +func TestContainer(t *testing.T) { + info, headers, err := c.Container(CONTAINER) + if err != nil { + t.Fatal(err) + } + compareMaps(t, headers.ContainerMetadata(), map[string]string{"hello": "1", "potato-salad": "2"}) + if CONTAINER != info.Name { + t.Error("Bad container count") + } + if headers["X-Container-Bytes-Used"] != fmt.Sprintf("%d", info.Bytes) { + t.Error("Bad bytes count") + } + if headers["X-Container-Object-Count"] != fmt.Sprintf("%d", info.Count) { + t.Error("Bad objects count") + } + //fmt.Println(info) + //fmt.Println(headers) +} + +func TestContainersAll(t *testing.T) { + containers1, err := c.ContainersAll(nil) + if err != nil { + t.Fatal(err) + } + containers2, err := c.Containers(nil) + if err != nil { + t.Fatal(err) + } + if len(containers1) != len(containers2) { + t.Fatal("Wrong length") + } + for i := range containers1 { + if containers1[i] != containers2[i] { + t.Fatal("Not the same") + } + } +} + +func TestContainersAllWithLimit(t *testing.T) { + containers1, err := c.ContainersAll(&swift.ContainersOpts{Limit: 1}) + if err != nil { + t.Fatal(err) + } + containers2, err := c.Containers(nil) + if err != nil { + t.Fatal(err) + } + if len(containers1) != len(containers2) { + t.Fatal("Wrong length") + } + for i := range containers1 { + if containers1[i] != containers2[i] { + t.Fatal("Not the same") + } + } +} + +func TestContainerUpdate(t *testing.T) { + err := c.ContainerUpdate(CONTAINER, m2.ContainerHeaders()) + if err != nil { + t.Fatal(err) + } + _, headers, err := c.Container(CONTAINER) + if err != nil { + t.Fatal(err) + } + compareMaps(t, headers.ContainerMetadata(), map[string]string{}) + //fmt.Println(headers) +} + +func TestContainerNames(t *testing.T) { + containers, err := c.ContainerNames(nil) + if err != nil { + t.Fatal(err) + } + // fmt.Printf("container %q\n", CONTAINER) + ok := false + for _, container := range containers { + if container == CONTAINER { + ok = true + break + } + } + if !ok { + t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) + } + // fmt.Println(containers) +} + +func TestContainerNamesAll(t *testing.T) { + containers1, err := c.ContainerNamesAll(nil) + if err != nil { + t.Fatal(err) + } + containers2, err := c.ContainerNames(nil) + if err != nil { + t.Fatal(err) + } + if len(containers1) != len(containers2) { + t.Fatal("Wrong length") + } + for i := range containers1 { + if containers1[i] != containers2[i] { + t.Fatal("Not the same") + } + } +} + +func TestContainerNamesAllWithLimit(t *testing.T) { + containers1, err := c.ContainerNamesAll(&swift.ContainersOpts{Limit: 1}) + if err != nil { + t.Fatal(err) + } + containers2, err := c.ContainerNames(nil) + if err != nil { + t.Fatal(err) + } + if len(containers1) != len(containers2) { + t.Fatal("Wrong length") + } + for i := range containers1 { + if containers1[i] != containers2[i] { + t.Fatal("Not the same") + } + } +} + +func TestObjectPutString(t *testing.T) { + err := c.ObjectPutString(CONTAINER, OBJECT, CONTENTS, "") + if err != nil { + t.Fatal(err) + } + + info, _, err := c.Object(CONTAINER, OBJECT) + if err != nil { + t.Error(err) + } + if info.ContentType != "application/octet-stream" { + t.Error("Bad content type", info.ContentType) + } + if info.Bytes != CONTENT_SIZE { + t.Error("Bad length") + } + if info.Hash != CONTENT_MD5 { + t.Error("Bad length") + } +} + +func TestObjectEmpty(t *testing.T) { + err := c.ObjectPutString(CONTAINER, EMPTYOBJECT, "", "") + if err != nil { + t.Fatal(err) + } + + info, _, err := c.Object(CONTAINER, EMPTYOBJECT) + if err != nil { + t.Error(err) + } + if info.ContentType != "application/octet-stream" { + t.Error("Bad content type", info.ContentType) + } + if info.Bytes != 0 { + t.Errorf("Bad length want 0 got %v", info.Bytes) + } + if info.Hash != EMPTY_MD5 { + t.Errorf("Bad MD5 want %v got %v", EMPTY_MD5, info.Hash) + } + + // Tidy up + err = c.ObjectDelete(CONTAINER, EMPTYOBJECT) + if err != nil { + t.Error(err) + } +} + +func TestObjectPutBytes(t *testing.T) { + err := c.ObjectPutBytes(CONTAINER, OBJECT, []byte(CONTENTS), "") + if err != nil { + t.Fatal(err) + } + + info, _, err := c.Object(CONTAINER, OBJECT) + if err != nil { + t.Error(err) + } + if info.ContentType != "application/octet-stream" { + t.Error("Bad content type", info.ContentType) + } + if info.Bytes != CONTENT_SIZE { + t.Error("Bad length") + } + if info.Hash != CONTENT_MD5 { + t.Error("Bad length") + } +} + +func TestObjectPutMimeType(t *testing.T) { + err := c.ObjectPutString(CONTAINER, "test.jpg", CONTENTS, "") + if err != nil { + t.Fatal(err) + } + + info, _, err := c.Object(CONTAINER, "test.jpg") + if err != nil { + t.Error(err) + } + if info.ContentType != "image/jpeg" { + t.Error("Bad content type", info.ContentType) + } + + // Tidy up + err = c.ObjectDelete(CONTAINER, "test.jpg") + if err != nil { + t.Error(err) + } +} + +func TestObjectCreate(t *testing.T) { + out, err := c.ObjectCreate(CONTAINER, OBJECT2, true, "", "", nil) + if err != nil { + t.Fatal(err) + } + buf := &bytes.Buffer{} + hash := md5.New() + out2 := io.MultiWriter(out, buf, hash) + for i := 0; i < 100; i++ { + fmt.Fprintf(out2, "%d %s\n", i, CONTENTS) + } + err = out.Close() + if err != nil { + t.Error(err) + } + expected := buf.String() + contents, err := c.ObjectGetString(CONTAINER, OBJECT2) + if err != nil { + t.Error(err) + } + if contents != expected { + t.Error("Contents wrong") + } + + // Test writing on closed file + n, err := out.Write([]byte{0}) + if err == nil || n != 0 { + t.Error("Expecting error and n == 0 writing on closed file", err, n) + } + + // Now with hash instead + out, err = c.ObjectCreate(CONTAINER, OBJECT2, false, fmt.Sprintf("%x", hash.Sum(nil)), "", nil) + if err != nil { + t.Fatal(err) + } + _, err = out.Write(buf.Bytes()) + if err != nil { + t.Error(err) + } + err = out.Close() + if err != nil { + t.Error(err) + } + contents, err = c.ObjectGetString(CONTAINER, OBJECT2) + if err != nil { + t.Error(err) + } + if contents != expected { + t.Error("Contents wrong") + } + + // Now with bad hash + out, err = c.ObjectCreate(CONTAINER, OBJECT2, false, CONTENT_MD5, "", nil) + if err != nil { + t.Fatal(err) + } + // FIXME: work around bug which produces 503 not 422 for empty corrupted files + fmt.Fprintf(out, "Sausage") + err = out.Close() + if err != swift.ObjectCorrupted { + t.Error("Expecting object corrupted not", err) + } + + // Tidy up + err = c.ObjectDelete(CONTAINER, OBJECT2) + if err != nil { + t.Error(err) + } +} + +func TestObjectGetString(t *testing.T) { + contents, err := c.ObjectGetString(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + if contents != CONTENTS { + t.Error("Contents wrong") + } + //fmt.Println(contents) +} + +func TestObjectGetBytes(t *testing.T) { + contents, err := c.ObjectGetBytes(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + if string(contents) != CONTENTS { + t.Error("Contents wrong") + } + //fmt.Println(contents) +} + +func TestObjectOpen(t *testing.T) { + file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + n, err := io.Copy(&buf, file) + if err != nil { + t.Fatal(err) + } + if n != CONTENT_SIZE { + t.Fatal("Wrong length", n, CONTENT_SIZE) + } + if buf.String() != CONTENTS { + t.Error("Contents wrong") + } + err = file.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestObjectOpenPartial(t *testing.T) { + file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + n, err := io.CopyN(&buf, file, 1) + if err != nil { + t.Fatal(err) + } + if n != 1 { + t.Fatal("Wrong length", n, CONTENT_SIZE) + } + if buf.String() != CONTENTS[:1] { + t.Error("Contents wrong") + } + err = file.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestObjectOpenLength(t *testing.T) { + file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) + if err != nil { + t.Fatal(err) + } + // FIXME ideally this would check both branches of the Length() code + n, err := file.Length() + if err != nil { + t.Fatal(err) + } + if n != CONTENT_SIZE { + t.Fatal("Wrong length", n, CONTENT_SIZE) + } + err = file.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestObjectOpenSeek(t *testing.T) { + + plan := []struct { + whence int + offset int64 + result int64 + }{ + {-1, 0, 0}, + {-1, 0, 1}, + {-1, 0, 2}, + {0, 0, 0}, + {0, 0, 0}, + {0, 1, 1}, + {0, 2, 2}, + {1, 0, 3}, + {1, -2, 2}, + {1, 1, 4}, + {2, -1, 4}, + {2, -3, 2}, + {2, -2, 3}, + {2, -5, 0}, + {2, -4, 1}, + } + + file, _, err := c.ObjectOpen(CONTAINER, OBJECT, true, nil) + if err != nil { + t.Fatal(err) + } + + for _, p := range plan { + if p.whence >= 0 { + result, err := file.Seek(p.offset, p.whence) + if err != nil { + t.Fatal(err, p) + } + if result != p.result { + t.Fatal("Seek result was", result, "expecting", p.result, p) + } + + } + var buf bytes.Buffer + n, err := io.CopyN(&buf, file, 1) + if err != nil { + t.Fatal(err, p) + } + if n != 1 { + t.Fatal("Wrong length", n, p) + } + actual := buf.String() + expected := CONTENTS[p.result : p.result+1] + if actual != expected { + t.Error("Contents wrong, expecting", expected, "got", actual, p) + } + } + + err = file.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestObjectUpdate(t *testing.T) { + err := c.ObjectUpdate(CONTAINER, OBJECT, m1.ObjectHeaders()) + if err != nil { + t.Fatal(err) + } +} + +func checkTime(t *testing.T, when time.Time, low, high int) { + dt := time.Now().Sub(when) + if dt < time.Duration(low)*time.Second || dt > time.Duration(high)*time.Second { + t.Errorf("Time is wrong: dt=%q, when=%q", dt, when) + } +} + +func TestObject(t *testing.T) { + object, headers, err := c.Object(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "1", "potato-salad": "2"}) + if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" { + t.Error("Bad object info", object) + } + checkTime(t, object.LastModified, -10, 10) +} + +func TestObjectUpdate2(t *testing.T) { + err := c.ObjectUpdate(CONTAINER, OBJECT, m2.ObjectHeaders()) + if err != nil { + t.Fatal(err) + } + _, headers, err := c.Object(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + //fmt.Println(headers, headers.ObjectMetadata()) + compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) +} + +func TestContainers(t *testing.T) { + containers, err := c.Containers(nil) + if err != nil { + t.Fatal(err) + } + ok := false + for _, container := range containers { + if container.Name == CONTAINER { + ok = true + // Container may or may not have the file contents in it + // Swift updates may be behind + if container.Count == 0 && container.Bytes == 0 { + break + } + if container.Count == 1 && container.Bytes == CONTENT_SIZE { + break + } + t.Errorf("Bad size of Container %q: %q", CONTAINER, container) + break + } + } + if !ok { + t.Errorf("Didn't find container %q in listing %q", CONTAINER, containers) + } + //fmt.Println(containers) +} + +func TestObjectNames(t *testing.T) { + objects, err := c.ObjectNames(CONTAINER, nil) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0] != OBJECT { + t.Error("Incorrect listing", objects) + } + //fmt.Println(objects) +} + +func TestObjectNamesAll(t *testing.T) { + objects, err := c.ObjectNamesAll(CONTAINER, nil) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0] != OBJECT { + t.Error("Incorrect listing", objects) + } + //fmt.Println(objects) +} + +func TestObjectNamesAllWithLimit(t *testing.T) { + objects, err := c.ObjectNamesAll(CONTAINER, &swift.ObjectsOpts{Limit: 1}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0] != OBJECT { + t.Error("Incorrect listing", objects) + } + //fmt.Println(objects) +} + +func TestObjectsWalk(t *testing.T) { + objects := make([]string, 0) + err := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) { + newObjects, err := c.ObjectNames(CONTAINER, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0] != OBJECT { + t.Error("Incorrect listing", objects) + } + //fmt.Println(objects) +} + +func TestObjects(t *testing.T) { + objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 { + t.Fatal("Should only be 1 object") + } + object := objects[0] + if object.Name != OBJECT || object.Bytes != CONTENT_SIZE || object.ContentType != "application/octet-stream" || object.Hash != CONTENT_MD5 || object.PseudoDirectory != false || object.SubDir != "" { + t.Error("Bad object info", object) + } + checkTime(t, object.LastModified, -10, 10) + // fmt.Println(objects) +} + +func TestObjectsDirectory(t *testing.T) { + err := c.ObjectPutString(CONTAINER, "directory", "", "application/directory") + if err != nil { + t.Fatal(err) + } + defer c.ObjectDelete(CONTAINER, "directory") + + // Look for the directory object and check we aren't confusing + // it with a pseudo directory object + objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 2 { + t.Fatal("Should only be 2 objects") + } + found := false + for i := range objects { + object := objects[i] + if object.Name == "directory" { + found = true + if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "d41d8cd98f00b204e9800998ecf8427e" || object.PseudoDirectory != false || object.SubDir != "" { + t.Error("Bad object info", object) + } + checkTime(t, object.LastModified, -10, 10) + } + } + if !found { + t.Error("Didn't find directory object") + } + // fmt.Println(objects) +} + +func TestObjectsPseudoDirectory(t *testing.T) { + err := c.ObjectPutString(CONTAINER, "directory/puppy.jpg", "cute puppy", "") + if err != nil { + t.Fatal(err) + } + defer c.ObjectDelete(CONTAINER, "directory/puppy.jpg") + + // Look for the pseudo directory + objects, err := c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/'}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 2 { + t.Fatal("Should only be 2 objects", objects) + } + found := false + for i := range objects { + object := objects[i] + if object.Name == "directory/" { + found = true + if object.Bytes != 0 || object.ContentType != "application/directory" || object.Hash != "" || object.PseudoDirectory != true || object.SubDir != "directory/" && object.LastModified.IsZero() { + t.Error("Bad object info", object) + } + } + } + if !found { + t.Error("Didn't find directory object", objects) + } + + // Look in the pseudo directory now + objects, err = c.Objects(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "directory/"}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 { + t.Fatal("Should only be 1 object", objects) + } + object := objects[0] + if object.Name != "directory/puppy.jpg" || object.Bytes != 10 || object.ContentType != "image/jpeg" || object.Hash != "87a12ea22fca7f54f0cefef1da535489" || object.PseudoDirectory != false || object.SubDir != "" { + t.Error("Bad object info", object) + } + checkTime(t, object.LastModified, -10, 10) + // fmt.Println(objects) +} + +func TestObjectsAll(t *testing.T) { + objects, err := c.ObjectsAll(CONTAINER, nil) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0].Name != OBJECT { + t.Error("Incorrect listing", objects) + } + //fmt.Println(objects) +} + +func TestObjectsAllWithLimit(t *testing.T) { + objects, err := c.ObjectsAll(CONTAINER, &swift.ObjectsOpts{Limit: 1}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0].Name != OBJECT { + t.Error("Incorrect listing", objects) + } + //fmt.Println(objects) +} + +func TestObjectNamesWithPath(t *testing.T) { + objects, err := c.ObjectNames(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: ""}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 1 || objects[0] != OBJECT { + t.Error("Bad listing with path", objects) + } + // fmt.Println(objects) + objects, err = c.ObjectNames(CONTAINER, &swift.ObjectsOpts{Delimiter: '/', Path: "Downloads/"}) + if err != nil { + t.Fatal(err) + } + if len(objects) != 0 { + t.Error("Bad listing with path", objects) + } + // fmt.Println(objects) +} + +func TestObjectCopy(t *testing.T) { + _, err := c.ObjectCopy(CONTAINER, OBJECT, CONTAINER, OBJECT2, nil) + if err != nil { + t.Fatal(err) + } + err = c.ObjectDelete(CONTAINER, OBJECT2) + if err != nil { + t.Fatal(err) + } +} + +func TestObjectCopyWithMetadata(t *testing.T) { + m := swift.Metadata{} + m["copy-special-metadata"] = "hello" + m["hello"] = "3" + h := m.ObjectHeaders() + h["Content-Type"] = "image/jpeg" + _, err := c.ObjectCopy(CONTAINER, OBJECT, CONTAINER, OBJECT2, h) + if err != nil { + t.Fatal(err) + } + // Re-read the metadata to see if it is correct + _, headers, err := c.Object(CONTAINER, OBJECT2) + if err != nil { + t.Fatal(err) + } + if headers["Content-Type"] != "image/jpeg" { + t.Error("Didn't change content type") + } + compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "3", "potato-salad": "", "copy-special-metadata": "hello"}) + err = c.ObjectDelete(CONTAINER, OBJECT2) + if err != nil { + t.Fatal(err) + } +} + +func TestObjectMove(t *testing.T) { + err := c.ObjectMove(CONTAINER, OBJECT, CONTAINER, OBJECT2) + if err != nil { + t.Fatal(err) + } + testExistenceAfterDelete(t, CONTAINER, OBJECT) + _, _, err = c.Object(CONTAINER, OBJECT2) + if err != nil { + t.Fatal(err) + } + + err = c.ObjectMove(CONTAINER, OBJECT2, CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + testExistenceAfterDelete(t, CONTAINER, OBJECT2) + _, headers, err := c.Object(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) +} + +func TestObjectUpdateContentType(t *testing.T) { + err := c.ObjectUpdateContentType(CONTAINER, OBJECT, "text/potato") + if err != nil { + t.Fatal(err) + } + // Re-read the metadata to see if it is correct + _, headers, err := c.Object(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + if headers["Content-Type"] != "text/potato" { + t.Error("Didn't change content type") + } + compareMaps(t, headers.ObjectMetadata(), map[string]string{"hello": "", "potato-salad": ""}) +} + +func TestVersionContainerCreate(t *testing.T) { + if err := c.VersionContainerCreate(CURRENT_CONTAINER, VERSIONS_CONTAINER); err != nil { + if err == swift.Forbidden { + t.Log("Server doesn't support Versions - skipping test") + skipVersionTests = true + return + } + t.Fatal(err) + } +} + +func TestVersionObjectAdd(t *testing.T) { + if skipVersionTests { + t.Log("Server doesn't support Versions - skipping test") + return + } + // Version 1 + if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS, ""); err != nil { + t.Fatal(err) + } + if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { + t.Fatal(err) + } else if contents != CONTENTS { + t.Error("Contents wrong") + } + + // Version 2 + if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil { + t.Fatal(err) + } + if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { + t.Fatal(err) + } else if contents != CONTENTS2 { + t.Error("Contents wrong") + } + + // Version 3 + if err := c.ObjectPutString(CURRENT_CONTAINER, OBJECT, CONTENTS2, ""); err != nil { + t.Fatal(err) + } +} + +func TestVersionObjectList(t *testing.T) { + if skipVersionTests { + t.Log("Server doesn't support Versions - skipping test") + return + } + list, err := c.VersionObjectList(VERSIONS_CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + + if len(list) != 2 { + t.Error("Version list should return 2 objects") + } + + //fmt.Print(list) +} + +func TestVersionObjectDelete(t *testing.T) { + if skipVersionTests { + t.Log("Server doesn't support Versions - skipping test") + return + } + // Delete Version 3 + if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { + t.Fatal(err) + } + + // Delete Version 2 + if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { + t.Fatal(err) + } + + // Contents should be reverted to Version 1 + if contents, err := c.ObjectGetString(CURRENT_CONTAINER, OBJECT); err != nil { + t.Fatal(err) + } else if contents != CONTENTS { + t.Error("Contents wrong") + } +} + +// cleanUpContainer deletes everything in the container and then the +// container. It expects the container to be empty and if it wasn't +// it logs an error. +func cleanUpContainer(t *testing.T, container string) { + objects, err := c.Objects(container, nil) + if err != nil { + t.Error(err, container) + } else { + if len(objects) != 0 { + t.Error("Container not empty", container) + } + for _, object := range objects { + t.Log("Deleting spurious", object.Name) + err = c.ObjectDelete(container, object.Name) + if err != nil { + t.Error(err, container) + } + } + } + + if err := c.ContainerDelete(container); err != nil { + t.Error(err, container) + } +} + +func TestVersionDeleteContent(t *testing.T) { + if skipVersionTests { + t.Log("Server doesn't support Versions - skipping test") + } else { + // Delete Version 1 + if err := c.ObjectDelete(CURRENT_CONTAINER, OBJECT); err != nil { + t.Fatal(err) + } + } + cleanUpContainer(t, VERSIONS_CONTAINER) + cleanUpContainer(t, CURRENT_CONTAINER) +} + +// Check for non existence after delete +// May have to do it a few times to wait for swift to be consistent. +func testExistenceAfterDelete(t *testing.T, container, object string) { + for i := 10; i <= 0; i-- { + _, _, err := c.Object(container, object) + if err == swift.ObjectNotFound { + break + } + if i == 0 { + t.Fatalf("Expecting object %q/%q not found not: err=%v", container, object, err) + } + time.Sleep(1 * time.Second) + } +} + +func TestObjectDelete(t *testing.T) { + err := c.ObjectDelete(CONTAINER, OBJECT) + if err != nil { + t.Fatal(err) + } + testExistenceAfterDelete(t, CONTAINER, OBJECT) + err = c.ObjectDelete(CONTAINER, OBJECT) + if err != swift.ObjectNotFound { + t.Fatal("Expecting Object not found", err) + } +} + +func TestBulkDelete(t *testing.T) { + result, err := c.BulkDelete(CONTAINER, []string{OBJECT}) + if err == swift.Forbidden { + t.Log("Server doesn't support BulkDelete - skipping test") + return + } + if err != nil { + t.Fatal(err) + } + if result.NumberNotFound != 1 { + t.Error("Expected 1, actual:", result.NumberNotFound) + } + if result.NumberDeleted != 0 { + t.Error("Expected 0, actual:", result.NumberDeleted) + } + err = c.ObjectPutString(CONTAINER, OBJECT, CONTENTS, "") + if err != nil { + t.Fatal(err) + } + result, err = c.BulkDelete(CONTAINER, []string{OBJECT2, OBJECT}) + if err != nil { + t.Fatal(err) + } + if result.NumberNotFound != 1 { + t.Error("Expected 1, actual:", result.NumberNotFound) + } + if result.NumberDeleted != 1 { + t.Error("Expected 1, actual:", result.NumberDeleted) + } + t.Log("Errors:", result.Errors) +} + +func TestBulkUpload(t *testing.T) { + buffer := new(bytes.Buffer) + ds := tar.NewWriter(buffer) + var files = []struct{ Name, Body string }{ + {OBJECT, CONTENTS}, + {OBJECT2, CONTENTS2}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Size: int64(len(file.Body)), + } + if err := ds.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err := ds.Write([]byte(file.Body)); err != nil { + t.Fatal(err) + } + } + if err := ds.Close(); err != nil { + t.Fatal(err) + } + + result, err := c.BulkUpload(CONTAINER, buffer, swift.UploadTar, nil) + if err == swift.Forbidden { + t.Log("Server doesn't support BulkUpload - skipping test") + return + } + if err != nil { + t.Fatal(err) + } + if result.NumberCreated != 2 { + t.Error("Expected 2, actual:", result.NumberCreated) + } + t.Log("Errors:", result.Errors) + + _, _, err = c.Object(CONTAINER, OBJECT) + if err != nil { + t.Error("Expecting object to be found") + } + _, _, err = c.Object(CONTAINER, OBJECT2) + if err != nil { + t.Error("Expecting object to be found") + } + c.ObjectDelete(CONTAINER, OBJECT) + c.ObjectDelete(CONTAINER, OBJECT2) +} + +func TestObjectDifficultName(t *testing.T) { + const name = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/` + err := c.ObjectPutString(CONTAINER, name, CONTENTS, "") + if err != nil { + t.Fatal(err) + } + objects, err := c.ObjectNamesAll(CONTAINER, nil) + if err != nil { + t.Error(err) + } + found := false + for _, object := range objects { + if object == name { + found = true + break + } + } + if !found { + t.Errorf("Couldn't find %q in listing %q", name, objects) + } + err = c.ObjectDelete(CONTAINER, name) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerDelete(t *testing.T) { + err := c.ContainerDelete(CONTAINER) + if err != nil { + t.Fatal(err) + } + err = c.ContainerDelete(CONTAINER) + if err != swift.ContainerNotFound { + t.Fatal("Expecting container not found", err) + } + _, _, err = c.Container(CONTAINER) + if err != swift.ContainerNotFound { + t.Fatal("Expecting container not found", err) + } +} + +func TestUnAuthenticate(t *testing.T) { + c.UnAuthenticate() + if c.Authenticated() { + t.Fatal("Shouldn't be authenticated") + } + // Test re-authenticate + err := c.Authenticate() + if err != nil { + t.Fatal("ReAuth failed", err) + } + if !c.Authenticated() { + t.Fatal("Not authenticated") + } +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go b/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go new file mode 100644 index 000000000..78c07da41 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/swifttest/server.go @@ -0,0 +1,885 @@ +// This implements a very basic Swift server +// Everything is stored in memory +// +// This comes from the https://github.com/mitchellh/goamz +// and was adapted for Swift +// +package swifttest + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net" + "net/http" + "net/url" + "path" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/ncw/swift" +) + +const ( + DEBUG = false +) + +type SwiftServer struct { + t *testing.T + reqId int + mu sync.Mutex + Listener net.Listener + AuthURL string + URL string + Containers map[string]*container + Accounts map[string]*account + Sessions map[string]*session +} + +// The Folder type represents a container stored in an account +type Folder struct { + Count int `json:"count"` + Bytes int `json:"bytes"` + Name string `json:"name"` +} + +// The Key type represents an item stored in an container. +type Key struct { + Key string `json:"name"` + LastModified string `json:"last_modified"` + Size int64 `json:"bytes"` + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string `json:"hash"` + ContentType string `json:"content_type"` + // Owner Owner +} + +type Subdir struct { + Subdir string `json:"subdir"` +} + +type swiftError struct { + statusCode int + Code string + Message string +} + +type action struct { + srv *SwiftServer + w http.ResponseWriter + req *http.Request + reqId string + user *account +} + +type session struct { + username string +} + +type metadata struct { + meta http.Header // metadata to return with requests. +} + +type account struct { + swift.Account + metadata + password string +} + +type object struct { + metadata + name string + mtime time.Time + checksum []byte // also held as ETag in meta. + data []byte + content_type string +} + +type container struct { + metadata + name string + ctime time.Time + objects map[string]*object + bytes int +} + +// A resource encapsulates the subject of an HTTP request. +// The resource referred to may or may not exist +// when the request is made. +type resource interface { + put(a *action) interface{} + get(a *action) interface{} + post(a *action) interface{} + delete(a *action) interface{} + copy(a *action) interface{} +} + +type objectResource struct { + name string + version string + container *container // always non-nil. + object *object // may be nil. +} + +type containerResource struct { + name string + container *container // non-nil if the container already exists. +} + +var responseParams = map[string]bool{ + "content-type": true, + "content-language": true, + "expires": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, +} + +func fatalf(code int, codeStr string, errf string, a ...interface{}) { + panic(&swiftError{ + statusCode: code, + Code: codeStr, + Message: fmt.Sprintf(errf, a...), + }) +} + +func (m metadata) setMetadata(a *action, resource string) { + for key, values := range a.req.Header { + key = http.CanonicalHeaderKey(key) + if metaHeaders[key] || strings.HasPrefix(key, "X-"+strings.Title(resource)+"-Meta-") { + if values[0] != "" || resource == "object" { + m.meta[key] = values + } else { + m.meta.Del(key) + } + } + } +} + +func (m metadata) getMetadata(a *action) { + h := a.w.Header() + for name, d := range m.meta { + h[name] = d + } +} + +func (c container) list(delimiter string, marker string, prefix string, parent string) (resp []interface{}) { + var tmp orderedObjects + + // first get all matching objects and arrange them in alphabetical order. + for _, obj := range c.objects { + if strings.HasPrefix(obj.name, prefix) { + tmp = append(tmp, obj) + } + } + sort.Sort(tmp) + + var prefixes []string + for _, obj := range tmp { + if !strings.HasPrefix(obj.name, prefix) { + continue + } + + isPrefix := false + name := obj.name + if parent != "" { + if path.Dir(obj.name) != path.Clean(parent) { + continue + } + } else if delimiter != "" { + if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { + name = obj.name[:len(prefix)+i+len(delimiter)] + if prefixes != nil && prefixes[len(prefixes)-1] == name { + continue + } + isPrefix = true + } + } + + if name <= marker { + continue + } + + if isPrefix { + prefixes = append(prefixes, name) + + resp = append(resp, Subdir{ + Subdir: name, + }) + } else { + resp = append(resp, obj) + } + } + + return +} + +// GET on a container lists the objects in the container. +func (r containerResource) get(a *action) interface{} { + if r.container == nil { + fatalf(404, "NoSuchContainer", "The specified container does not exist") + } + + delimiter := a.req.Form.Get("delimiter") + marker := a.req.Form.Get("marker") + prefix := a.req.Form.Get("prefix") + format := a.req.URL.Query().Get("format") + parent := a.req.Form.Get("path") + + a.w.Header().Set("X-Container-Bytes-Used", strconv.Itoa(r.container.bytes)) + a.w.Header().Set("X-Container-Object-Count", strconv.Itoa(len(r.container.objects))) + r.container.getMetadata(a) + + if a.req.Method == "HEAD" { + return nil + } + + objects := r.container.list(delimiter, marker, prefix, parent) + + if format == "json" { + a.w.Header().Set("Content-Type", "application/json") + var resp []interface{} + for _, item := range objects { + if obj, ok := item.(*object); ok { + resp = append(resp, obj.Key()) + } else { + resp = append(resp, item) + } + } + return resp + } else { + for _, item := range objects { + if obj, ok := item.(*object); ok { + a.w.Write([]byte(obj.name + "\n")) + } else if subdir, ok := item.(Subdir); ok { + a.w.Write([]byte(subdir.Subdir + "\n")) + } + } + return nil + } +} + +// orderedContainers holds a slice of containers that can be sorted +// by name. +type orderedContainers []*container + +func (s orderedContainers) Len() int { + return len(s) +} +func (s orderedContainers) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedContainers) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (r containerResource) delete(a *action) interface{} { + b := r.container + if b == nil { + fatalf(404, "NoSuchContainer", "The specified container does not exist") + } + if len(b.objects) > 0 { + fatalf(409, "Conflict", "The container you tried to delete is not empty") + } + delete(a.srv.Containers, b.name) + a.user.Containers-- + return nil +} + +func (r containerResource) put(a *action) interface{} { + if a.req.URL.Query().Get("extract-archive") != "" { + fatalf(403, "Operation forbidden", "Bulk upload is not supported") + } + + if r.container == nil { + if !validContainerName(r.name) { + fatalf(400, "InvalidContainerName", "The specified container is not valid") + } + r.container = &container{ + name: r.name, + objects: make(map[string]*object), + metadata: metadata{ + meta: make(http.Header), + }, + } + r.container.setMetadata(a, "container") + a.srv.Containers[r.name] = r.container + a.user.Containers++ + } + + return nil +} + +func (r containerResource) post(a *action) interface{} { + if r.container == nil { + fatalf(400, "Method", "The resource could not be found.") + } else { + r.container.setMetadata(a, "container") + a.w.WriteHeader(201) + jsonMarshal(a.w, Folder{ + Count: len(r.container.objects), + Bytes: r.container.bytes, + Name: r.container.name, + }) + } + return nil +} + +func (containerResource) copy(a *action) interface{} { return notAllowed() } + +// validContainerName returns whether name is a valid bucket name. +// Here are the rules, from: +// http://docs.openstack.org/api/openstack-object-storage/1.0/content/ch_object-storage-dev-api-storage.html +// +// Container names cannot exceed 256 bytes and cannot contain the / character. +// +func validContainerName(name string) bool { + if len(name) == 0 || len(name) > 256 { + return false + } + for _, r := range name { + switch { + case r == '/': + return false + default: + } + } + return true +} + +// orderedObjects holds a slice of objects that can be sorted +// by name. +type orderedObjects []*object + +func (s orderedObjects) Len() int { + return len(s) +} +func (s orderedObjects) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedObjects) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (obj *object) Key() Key { + return Key{ + Key: obj.name, + LastModified: obj.mtime.Format("2006-01-02T15:04:05"), + Size: int64(len(obj.data)), + ETag: fmt.Sprintf("%x", obj.checksum), + ContentType: obj.content_type, + } +} + +var metaHeaders = map[string]bool{ + "Content-Type": true, + "Content-Encoding": true, + "Content-Disposition": true, + "X-Object-Manifest": true, +} + +var rangeRegexp = regexp.MustCompile("(bytes=)?([0-9]*)-([0-9]*)") + +// GET on an object gets the contents of the object. +func (objr objectResource) get(a *action) interface{} { + var ( + etag []byte + reader io.Reader + start int + end int = -1 + ) + obj := objr.object + if obj == nil { + fatalf(404, "Not Found", "The resource could not be found.") + } + + h := a.w.Header() + // add metadata + obj.getMetadata(a) + + if r := a.req.Header.Get("Range"); r != "" { + m := rangeRegexp.FindStringSubmatch(r) + if m[2] != "" { + start, _ = strconv.Atoi(m[2]) + } + if m[3] != "" { + end, _ = strconv.Atoi(m[3]) + } + } + + max := func(a int, b int) int { + if a > b { + return a + } + return b + } + + if manifest, ok := obj.meta["X-Object-Manifest"]; ok { + var segments []io.Reader + components := strings.SplitN(manifest[0], "/", 2) + segContainer := a.srv.Containers[components[0]] + prefix := components[1] + resp := segContainer.list("", "", prefix, "") + sum := md5.New() + cursor := 0 + size := 0 + for _, item := range resp { + if obj, ok := item.(*object); ok { + length := len(obj.data) + size += length + sum.Write([]byte(components[0] + "/" + obj.name + "\n")) + if start >= cursor+length { + continue + } + segments = append(segments, bytes.NewReader(obj.data[max(0, start-cursor):])) + cursor += length + } + } + etag = sum.Sum(nil) + if end == -1 { + end = size + } + reader = io.LimitReader(io.MultiReader(segments...), int64(end-start)) + } else { + if end == -1 { + end = len(obj.data) + } + etag = obj.checksum + reader = bytes.NewReader(obj.data[start:end]) + } + + h.Set("Content-Length", fmt.Sprint(end-start)) + h.Set("ETag", hex.EncodeToString(etag)) + h.Set("Last-Modified", obj.mtime.Format(http.TimeFormat)) + + if a.req.Method == "HEAD" { + return nil + } + + // TODO avoid holding the lock when writing data. + _, err := io.Copy(a.w, reader) + if err != nil { + // we can't do much except just log the fact. + log.Printf("error writing data: %v", err) + } + return nil +} + +// PUT on an object creates the object. +func (objr objectResource) put(a *action) interface{} { + var expectHash []byte + if c := a.req.Header.Get("ETag"); c != "" { + var err error + expectHash, err = hex.DecodeString(c) + if err != nil || len(expectHash) != md5.Size { + fatalf(400, "InvalidDigest", "The ETag you specified was invalid") + } + } + sum := md5.New() + // TODO avoid holding lock while reading data. + data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) + if err != nil { + fatalf(400, "TODO", "read error") + } + gotHash := sum.Sum(nil) + if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { + fatalf(422, "Bad ETag", "The ETag you specified did not match what we received") + } + if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { + fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") + } + + // TODO is this correct, or should we erase all previous metadata? + obj := objr.object + if obj == nil { + obj = &object{ + name: objr.name, + metadata: metadata{ + meta: make(http.Header), + }, + } + a.user.Objects++ + } else { + objr.container.bytes -= len(obj.data) + a.user.BytesUsed -= int64(len(obj.data)) + } + + var content_type string + if content_type = a.req.Header.Get("Content-Type"); content_type == "" { + content_type = mime.TypeByExtension(obj.name) + if content_type == "" { + content_type = "application/octet-stream" + } + } + + // PUT request has been successful - save data and metadata + obj.setMetadata(a, "object") + obj.content_type = content_type + obj.data = data + obj.checksum = gotHash + obj.mtime = time.Now().UTC() + objr.container.objects[objr.name] = obj + objr.container.bytes += len(data) + a.user.BytesUsed += int64(len(data)) + + h := a.w.Header() + h.Set("ETag", hex.EncodeToString(obj.checksum)) + + return nil +} + +func (objr objectResource) delete(a *action) interface{} { + if objr.object == nil { + fatalf(404, "NoSuchKey", "The specified key does not exist.") + } + + objr.container.bytes -= len(objr.object.data) + a.user.BytesUsed -= int64(len(objr.object.data)) + delete(objr.container.objects, objr.name) + a.user.Objects-- + return nil +} + +func (objr objectResource) post(a *action) interface{} { + obj := objr.object + obj.setMetadata(a, "object") + return nil +} + +func (objr objectResource) copy(a *action) interface{} { + if objr.object == nil { + fatalf(404, "NoSuchKey", "The specified key does not exist.") + } + + obj := objr.object + destination := a.req.Header.Get("Destination") + if destination == "" { + fatalf(400, "Bad Request", "You must provide a Destination header") + } + + var ( + obj2 *object + objr2 objectResource + ) + + destURL, _ := url.Parse("/v1/AUTH_tk/" + destination) + r := a.srv.resourceForURL(destURL) + switch t := r.(type) { + case objectResource: + objr2 = t + if objr2.object == nil { + obj2 = &object{ + name: objr2.name, + metadata: metadata{ + meta: make(http.Header), + }, + } + a.user.Objects++ + } else { + obj2 = objr2.object + objr2.container.bytes -= len(obj2.data) + a.user.BytesUsed -= int64(len(obj2.data)) + } + default: + fatalf(400, "Bad Request", "Destination must point to a valid object path") + } + + obj2.content_type = obj.content_type + obj2.data = obj.data + obj2.checksum = obj.checksum + obj2.mtime = time.Now() + objr2.container.objects[objr2.name] = obj2 + objr2.container.bytes += len(obj.data) + a.user.BytesUsed += int64(len(obj.data)) + + for key, values := range obj.metadata.meta { + obj2.metadata.meta[key] = values + } + obj2.setMetadata(a, "object") + + return nil +} + +func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { + // ignore error from ParseForm as it's usually spurious. + req.ParseForm() + + s.mu.Lock() + defer s.mu.Unlock() + + if DEBUG { + log.Printf("swifttest %q %q", req.Method, req.URL) + } + a := &action{ + srv: s, + w: w, + req: req, + reqId: fmt.Sprintf("%09X", s.reqId), + } + s.reqId++ + + var r resource + defer func() { + switch err := recover().(type) { + case *swiftError: + w.Header().Set("Content-Type", `text/plain; charset=utf-8`) + http.Error(w, err.Message, err.statusCode) + case nil: + default: + panic(err) + } + }() + + var resp interface{} + + if req.URL.String() == "/v1.0" { + username := req.Header.Get("x-auth-user") + key := req.Header.Get("x-auth-key") + if acct, ok := s.Accounts[username]; ok { + if acct.password == key { + r := make([]byte, 16) + _, _ = rand.Read(r) + id := fmt.Sprintf("%X", r) + w.Header().Set("X-Storage-Url", s.URL+"/AUTH_"+username) + w.Header().Set("X-Auth-Token", "AUTH_tk"+string(id)) + w.Header().Set("X-Storage-Token", "AUTH_tk"+string(id)) + s.Sessions[id] = &session{ + username: username, + } + return + } + } + panic(notAuthorized()) + } + + key := req.Header.Get("x-auth-token") + session, ok := s.Sessions[key[7:]] + if !ok { + panic(notAuthorized()) + } + + a.user = s.Accounts[session.username] + + r = s.resourceForURL(req.URL) + + switch req.Method { + case "PUT": + resp = r.put(a) + case "GET", "HEAD": + resp = r.get(a) + case "DELETE": + resp = r.delete(a) + case "POST": + resp = r.post(a) + case "COPY": + resp = r.copy(a) + default: + fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) + } + + content_type := req.Header.Get("Content-Type") + if resp != nil && req.Method != "HEAD" { + if strings.HasPrefix(content_type, "application/json") || + req.URL.Query().Get("format") == "json" { + jsonMarshal(w, resp) + } else { + switch r := resp.(type) { + case string: + w.Write([]byte(r)) + default: + w.Write(resp.([]byte)) + } + } + } +} + +func jsonMarshal(w io.Writer, x interface{}) { + if err := json.NewEncoder(w).Encode(x); err != nil { + panic(fmt.Errorf("error marshalling %#v: %v", x, err)) + } +} + +var pathRegexp = regexp.MustCompile("/v1/AUTH_[a-zA-Z0-9]+(/([^/]+)(/(.*))?)?") + +// resourceForURL returns a resource object for the given URL. +func (srv *SwiftServer) resourceForURL(u *url.URL) (r resource) { + m := pathRegexp.FindStringSubmatch(u.Path) + if m == nil { + fatalf(404, "InvalidURI", "Couldn't parse the specified URI") + } + containerName := m[2] + objectName := m[4] + if containerName == "" { + return rootResource{} + } + b := containerResource{ + name: containerName, + container: srv.Containers[containerName], + } + + if objectName == "" { + return b + } + + if b.container == nil { + fatalf(404, "NoSuchContainer", "The specified container does not exist") + } + + objr := objectResource{ + name: objectName, + version: u.Query().Get("versionId"), + container: b.container, + } + + if obj := objr.container.objects[objr.name]; obj != nil { + objr.object = obj + } + return objr +} + +// nullResource has error stubs for all resource methods. +type nullResource struct{} + +func notAllowed() interface{} { + fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +func notAuthorized() interface{} { + fatalf(401, "Unauthorized", "This server could not verify that you are authorized to access the document you requested.") + return nil +} + +func (nullResource) put(a *action) interface{} { return notAllowed() } +func (nullResource) get(a *action) interface{} { return notAllowed() } +func (nullResource) post(a *action) interface{} { return notAllowed() } +func (nullResource) delete(a *action) interface{} { return notAllowed() } +func (nullResource) copy(a *action) interface{} { return notAllowed() } + +type rootResource struct{} + +func (rootResource) put(a *action) interface{} { return notAllowed() } +func (rootResource) get(a *action) interface{} { + marker := a.req.Form.Get("marker") + prefix := a.req.Form.Get("prefix") + format := a.req.URL.Query().Get("format") + + h := a.w.Header() + + h.Set("X-Account-Bytes-Used", strconv.Itoa(int(a.user.BytesUsed))) + h.Set("X-Account-Container-Count", strconv.Itoa(int(a.user.Containers))) + h.Set("X-Account-Object-Count", strconv.Itoa(int(a.user.Objects))) + + // add metadata + a.user.metadata.getMetadata(a) + + if a.req.Method == "HEAD" { + return nil + } + + var tmp orderedContainers + // first get all matching objects and arrange them in alphabetical order. + for _, container := range a.srv.Containers { + if strings.HasPrefix(container.name, prefix) { + tmp = append(tmp, container) + } + } + sort.Sort(tmp) + + resp := make([]Folder, 0) + for _, container := range tmp { + if container.name <= marker { + continue + } + if format == "json" { + resp = append(resp, Folder{ + Count: len(container.objects), + Bytes: container.bytes, + Name: container.name, + }) + } else { + a.w.Write([]byte(container.name + "\n")) + } + } + + if format == "json" { + return resp + } else { + return nil + } +} + +func (r rootResource) post(a *action) interface{} { + a.user.metadata.setMetadata(a, "account") + return nil +} + +func (rootResource) delete(a *action) interface{} { + if a.req.URL.Query().Get("bulk-delete") == "1" { + fatalf(403, "Operation forbidden", "Bulk delete is not supported") + } + + return notAllowed() +} + +func (rootResource) copy(a *action) interface{} { return notAllowed() } + +func NewSwiftServer(address string) (*SwiftServer, error) { + var ( + l net.Listener + err error + ) + if strings.Index(address, ":") == -1 { + for port := 1024; port < 65535; port++ { + addr := fmt.Sprintf("%s:%d", address, port) + if l, err = net.Listen("tcp", addr); err == nil { + address = addr + break + } + } + } else { + l, err = net.Listen("tcp", address) + } + if err != nil { + return nil, fmt.Errorf("cannot listen on %s: %v", address, err) + } + + server := &SwiftServer{ + Listener: l, + AuthURL: "http://" + l.Addr().String() + "/v1.0", + URL: "http://" + l.Addr().String() + "/v1", + Containers: make(map[string]*container), + Accounts: make(map[string]*account), + Sessions: make(map[string]*session), + } + + server.Accounts["swifttest"] = &account{ + password: "swifttest", + metadata: metadata{ + meta: make(http.Header), + }, + } + + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + server.serveHTTP(w, req) + })) + + return server, nil +} + +func (srv *SwiftServer) Close() { + srv.Listener.Close() +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go b/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go new file mode 100644 index 000000000..3839e9ea0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader.go @@ -0,0 +1,57 @@ +package swift + +import ( + "io" + "time" +) + +// An io.ReadCloser which obeys an idle timeout +type timeoutReader struct { + reader io.ReadCloser + timeout time.Duration + cancel func() +} + +// Returns a wrapper around the reader which obeys an idle +// timeout. The cancel function is called if the timeout happens +func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { + return &timeoutReader{ + reader: reader, + timeout: timeout, + cancel: cancel, + } +} + +// Read reads up to len(p) bytes into p +// +// Waits at most for timeout for the read to complete otherwise returns a timeout +func (t *timeoutReader) Read(p []byte) (int, error) { + // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? + // Do the read in the background + type result struct { + n int + err error + } + done := make(chan result, 1) + go func() { + n, err := t.reader.Read(p) + done <- result{n, err} + }() + // Wait for the read or the timeout + select { + case r := <-done: + return r.n, r.err + case <-time.After(t.timeout): + t.cancel() + return 0, TimeoutError + } + panic("unreachable") // for Go 1.0 +} + +// Close the channel +func (t *timeoutReader) Close() error { + return t.reader.Close() +} + +// Check it satisfies the interface +var _ io.ReadCloser = &timeoutReader{} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go b/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go new file mode 100644 index 000000000..2348617b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/timeout_reader_test.go @@ -0,0 +1,107 @@ +// This tests TimeoutReader + +package swift + +import ( + "io" + "io/ioutil" + "sync" + "testing" + "time" +) + +// An io.ReadCloser for testing +type testReader struct { + sync.Mutex + n int + delay time.Duration + closed bool +} + +// Returns n bytes with at time.Duration delay +func newTestReader(n int, delay time.Duration) *testReader { + return &testReader{ + n: n, + delay: delay, + } +} + +// Returns 1 byte at a time after delay +func (t *testReader) Read(p []byte) (n int, err error) { + if t.n <= 0 { + return 0, io.EOF + } + time.Sleep(t.delay) + p[0] = 'A' + t.Lock() + t.n-- + t.Unlock() + return 1, nil +} + +// Close the channel +func (t *testReader) Close() error { + t.Lock() + t.closed = true + t.Unlock() + return nil +} + +func TestTimeoutReaderNoTimeout(t *testing.T) { + test := newTestReader(3, 10*time.Millisecond) + cancelled := false + cancel := func() { + cancelled = true + } + tr := newTimeoutReader(test, 100*time.Millisecond, cancel) + b, err := ioutil.ReadAll(tr) + if err != nil || string(b) != "AAA" { + t.Fatalf("Bad read %s %s", err, b) + } + if cancelled { + t.Fatal("Cancelled when shouldn't have been") + } + if test.n != 0 { + t.Fatal("Didn't read all") + } + if test.closed { + t.Fatal("Shouldn't be closed") + } + tr.Close() + if !test.closed { + t.Fatal("Should be closed") + } +} + +func TestTimeoutReaderTimeout(t *testing.T) { + // Return those bytes slowly so we get an idle timeout + test := newTestReader(3, 100*time.Millisecond) + cancelled := false + cancel := func() { + cancelled = true + } + tr := newTimeoutReader(test, 10*time.Millisecond, cancel) + _, err := ioutil.ReadAll(tr) + if err != TimeoutError { + t.Fatal("Expecting TimeoutError, got", err) + } + if !cancelled { + t.Fatal("Not cancelled when should have been") + } + test.Lock() + n := test.n + test.Unlock() + if n == 0 { + t.Fatal("Read all") + } + if n != 3 { + t.Fatal("Didn't read any") + } + if test.closed { + t.Fatal("Shouldn't be closed") + } + tr.Close() + if !test.closed { + t.Fatal("Should be closed") + } +} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go b/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go new file mode 100644 index 000000000..b12b1bbe2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader.go @@ -0,0 +1,34 @@ +package swift + +import ( + "io" + "time" +) + +// An io.Reader which resets a watchdog timer whenever data is read +type watchdogReader struct { + timeout time.Duration + reader io.Reader + timer *time.Timer +} + +// Returns a new reader which will kick the watchdog timer whenever data is read +func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { + return &watchdogReader{ + timeout: timeout, + reader: reader, + timer: timer, + } +} + +// Read reads up to len(p) bytes into p +func (t *watchdogReader) Read(p []byte) (n int, err error) { + // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? + resetTimer(t.timer, t.timeout) + n, err = t.reader.Read(p) + resetTimer(t.timer, t.timeout) + return +} + +// Check it satisfies the interface +var _ io.Reader = &watchdogReader{} diff --git a/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go b/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go new file mode 100644 index 000000000..8b879d444 --- /dev/null +++ b/Godeps/_workspace/src/github.com/ncw/swift/watchdog_reader_test.go @@ -0,0 +1,61 @@ +// This tests WatchdogReader + +package swift + +import ( + "io/ioutil" + "testing" + "time" +) + +// Uses testReader from timeout_reader_test.go + +func testWatchdogReaderTimeout(t *testing.T, initialTimeout, watchdogTimeout time.Duration, expectedTimeout bool) { + test := newTestReader(3, 10*time.Millisecond) + timer := time.NewTimer(initialTimeout) + firedChan := make(chan bool) + started := make(chan bool) + go func() { + started <- true + select { + case <-timer.C: + firedChan <- true + } + }() + <-started + wr := newWatchdogReader(test, watchdogTimeout, timer) + b, err := ioutil.ReadAll(wr) + if err != nil || string(b) != "AAA" { + t.Fatalf("Bad read %s %s", err, b) + } + fired := false + select { + case fired = <-firedChan: + default: + } + if expectedTimeout { + if !fired { + t.Fatal("Timer should have fired") + } + } else { + if fired { + t.Fatal("Timer should not have fired") + } + } +} + +func TestWatchdogReaderNoTimeout(t *testing.T) { + testWatchdogReaderTimeout(t, 100*time.Millisecond, 100*time.Millisecond, false) +} + +func TestWatchdogReaderTimeout(t *testing.T) { + testWatchdogReaderTimeout(t, 5*time.Millisecond, 5*time.Millisecond, true) +} + +func TestWatchdogReaderNoTimeoutShortInitial(t *testing.T) { + testWatchdogReaderTimeout(t, 5*time.Millisecond, 100*time.Millisecond, false) +} + +func TestWatchdogReaderTimeoutLongInitial(t *testing.T) { + testWatchdogReaderTimeout(t, 100*time.Millisecond, 5*time.Millisecond, true) +} diff --git a/cmd/registry/main.go b/cmd/registry/main.go index f74b06abf..66ce6367f 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -28,6 +28,7 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" _ "github.com/docker/distribution/registry/storage/driver/s3" + _ "github.com/docker/distribution/registry/storage/driver/swift" "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" "github.com/yvasiyarov/gorelic" diff --git a/docs/configuration.md b/docs/configuration.md index 23b2a7780..1f3f15d37 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -92,6 +92,18 @@ information about each option that appears later in this page. poolname: radospool username: radosuser chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix cache: blobdescriptor: redis maintenance: @@ -296,6 +308,18 @@ Permitted values are `error`, `warn`, `info` and `debug`. The default is poolname: radospool username: radosuser chunksize: 4194304 + swift: + username: username + password: password + authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth + tenant: tenantname + tenantid: tenantid + domain: domain name for Openstack Identity v3 API + domainid: domain id for Openstack Identity v3 API + insecureskipverify: true + region: fr + container: containername + rootdirectory: /swift/object/name/prefix cache: blobdescriptor: inmemory maintenance: @@ -580,6 +604,151 @@ must be set. Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). +### Openstack Swift + +This storage backend uses Openstack Swift object storage. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRequiredDescription
+ authurl + + yes + + URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +
+ username + + yes + + Your Openstack user name. +
+ password + + yes + + Your Openstack password. +
+ region + + no + + The Openstack region in which your container exists. +
+ container + + yes + + The container name in which you want to store the registry's data. +
+ tenant + + no + + Your Openstack tenant name. +
+ tenantid + + no + + Your Openstack tenant id. +
+ domain + + no + + Your Openstack domain name for Identity v3 API. +
+ domainid + + no + + Your Openstack domain id for Identity v3 API. +
+ insecureskipverify + + no + + true to skip TLS verification, false by default. +
+ chunksize + + no + + Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). +
+ rootdirectory + + no + + This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. +
+ + ## auth auth: diff --git a/docs/storage-drivers/swift.md b/docs/storage-drivers/swift.md new file mode 100644 index 000000000..372cb6abc --- /dev/null +++ b/docs/storage-drivers/swift.md @@ -0,0 +1,139 @@ + + + +# OpenStack Swift storage driver + +An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. + +## Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ authurl + +

URL for obtaining an auth token.

+
+ username + +

+ Your OpenStack user name.

+

+
+ password +

+

+

+ Your OpenStack password. +

+
+ container + +

+ The name of your Swift container where you wish to store objects. An + additional container called _segments stores the data + is used. The driver creates both the named container and the segments + container during its initialization. +

+
+ tenant + +

+ Optionally, your OpenStack tenant name. You can either use tenant or tenantid. +

+
+ tenantid + +

+ Optionally, your OpenStack tenant id. You can either use tenant or tenantid. +

+
+ domain + +

+ Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. +

+
+ domainid + +

+ Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. +

+
+ insecureskipverify + +

+ Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. +

+
+ region + +

+ Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). +

+
+ chunksize + +

+ Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. +

+
+ prefix + +

+ Optionally, supply the root directory tree in which to store all registry files. Defaults to the empty string which is the container's root.

+

+
\ No newline at end of file diff --git a/docs/storagedrivers.md b/docs/storagedrivers.md index e574665a9..519961e18 100644 --- a/docs/storagedrivers.md +++ b/docs/storagedrivers.md @@ -23,6 +23,7 @@ This storage driver package comes bundled with several drivers: - [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. - [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). - [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. +- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). ## Storage Driver API diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go new file mode 100644 index 000000000..0921ccc03 --- /dev/null +++ b/registry/storage/driver/swift/swift.go @@ -0,0 +1,657 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// Since Swift has no concept of directories (directories are an abstration), +// empty objects are created with the MIME type application/vnd.swift.directory. +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. +package swift + +import ( + "bytes" + "crypto/rand" + "crypto/sha1" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + gopath "path" + "strconv" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/ncw/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" +) + +const driverName = "swift" + +// defaultChunkSize defines the default size of a segment +const defaultChunkSize = 20 * 1024 * 1024 + +// minChunkSize defines the minimum size of a segment +const minChunkSize = 1 << 20 + +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + Region string + Container string + Prefix string + InsecureSkipVerify bool + ChunkSize int +} + +type swiftInfo map[string]interface{} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + ChunkSize int +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params := Parameters{ + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, + } + + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params Parameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + + ct := swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + UserAgent: "distribution/" + version.Version, + Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + Transport: transport, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + BulkDeleteSupport: detectBulkDelete(params.AuthURL), + ChunkSize: params.ChunkSize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + var ( + segments []swift.Object + multi io.Reader + paddingReader io.Reader + currentLength int64 + cursor int64 + segmentPath string + ) + + partNumber := 1 + chunkSize := int64(d.ChunkSize) + zeroBuf := make([]byte, d.ChunkSize) + + getSegment := func() string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) + } + + max := func(a int64, b int64) int64 { + if a > b { + return a + } + return b + } + + createManifest := true + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + manifest, ok := headers["X-Object-Manifest"] + if !ok { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { + return 0, err + } + segments = append(segments, info) + } else { + _, segmentPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentPath); err != nil { + return 0, err + } + createManifest = false + } + currentLength = info.Bytes + } else if err == swift.ObjectNotFound { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { + return 0, err + } + } else { + return 0, err + } + + if createManifest { + if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { + return 0, err + } + } + + // First, we skip the existing segments that are not modified by this call + for i := range segments { + if offset < cursor+segments[i].Bytes { + break + } + cursor += segments[i].Bytes + partNumber++ + } + + // We reached the end of the file but we haven't reached 'offset' yet + // Therefore we add blocks of zeros + if offset >= currentLength { + for offset-currentLength >= chunkSize { + // Insert a block a zero + _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + currentLength += chunkSize + partNumber++ + } + + cursor = currentLength + paddingReader = bytes.NewReader(zeroBuf) + } else if offset-cursor > 0 { + // Offset is inside the current segment : we need to read the + // data from the beginning of the segment to offset + file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) + if err != nil { + if err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } + defer file.Close() + paddingReader = file + } + + readers := []io.Reader{} + if paddingReader != nil { + readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) + } + readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) + multi = io.MultiReader(readers...) + + writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { + currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + } + return false, bytesRead, err + } + + n, err := io.Copy(currentSegment, multi) + if err != nil { + return false, bytesRead, err + } + + if n > 0 { + defer currentSegment.Close() + bytesRead += n - max(0, offset-cursor) + } + + if n < chunkSize { + // We wrote all the data + if cursor+n < currentLength { + // Copy the end of the chunk + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + _, copyErr := io.Copy(currentSegment, file) + + if err := file.Close(); err != nil { + if err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err + } + + if copyErr != nil { + return false, bytesRead, copyErr + } + } + + return true, bytesRead, nil + } + + multi = io.LimitReader(reader, chunkSize) + cursor += chunkSize + partNumber++ + + return false, bytesRead, nil + } + + finished := false + read := int64(0) + bytesRead := int64(0) + for finished == false { + finished, read, err = writeSegment(getSegment()) + bytesRead += read + if err != nil { + return bytesRead, err + } + } + + return bytesRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), + } + + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // On Swift 1.12, the 'bytes' field is always 0 + // so we need to do a second HEAD request + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + } + + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Prefix: prefix, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) + for _, obj := range objects { + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) + } + + if err == swift.ContainerNotFound { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path) + "/", + } + + objects, err := d.Conn.ObjectsAll(d.Container, &opts) + if err != nil { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + + if d.BulkDeleteSupport { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } + + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + segContainer, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) + if err != nil { + return err + } + + for _, s := range segments { + if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: s.Name} + } + return err + } + } + } + } else { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } + + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err + } + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return segments, err +} + +func (d *driver) createManifest(path string, segments string) error { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil +} + +func detectBulkDelete(authURL string) (bulkDelete bool) { + resp, err := http.Get(gopath.Join(authURL, "..", "..") + "/info") + if err == nil { + defer resp.Body.Close() + decoder := json.NewDecoder(resp.Body) + var infos swiftInfo + if decoder.Decode(&infos) == nil { + _, bulkDelete = infos["bulk_delete"] + } + } + return +} + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} diff --git a/registry/storage/driver/swift/swift_test.go b/registry/storage/driver/swift/swift_test.go new file mode 100644 index 000000000..6be2238a5 --- /dev/null +++ b/registry/storage/driver/swift/swift_test.go @@ -0,0 +1,135 @@ +package swift + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/ncw/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var swiftDriverConstructor func(prefix string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + container string + region string + insecureSkipVerify bool + swiftServer *swifttest.SwiftServer + err error + ) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + prefix, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(prefix) + + swiftDriverConstructor = func(root string) (*Driver, error) { + parameters := Parameters{ + username, + password, + authURL, + tenant, + tenantID, + domain, + domainID, + region, + container, + root, + insecureSkipVerify, + defaultChunkSize, + } + + return New(parameters) + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(prefix) + } + + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) +} + +func TestEmptyRootList(t *testing.T) { + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +}