diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
new file mode 100644
index 00000000..8749a75b
--- /dev/null
+++ b/Godeps/Godeps.json
@@ -0,0 +1,95 @@
+{
+ "ImportPath": "github.com/docker/distribution",
+ "GoVersion": "go1.4",
+ "Packages": [
+ "./..."
+ ],
+ "Deps": [
+ {
+ "ImportPath": "code.google.com/p/go-uuid/uuid",
+ "Comment": "null-12",
+ "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
+ },
+ {
+ "ImportPath": "github.com/Sirupsen/logrus",
+ "Comment": "v0.6.1-8-gcc09837",
+ "Rev": "cc09837bcd512ffe6bb2e3f635bed138c4cd6bc8"
+ },
+ {
+ "ImportPath": "github.com/bugsnag/bugsnag-go",
+ "Comment": "v1.0.2-5-gb1d1530",
+ "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274"
+ },
+ {
+ "ImportPath": "github.com/bugsnag/osext",
+ "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
+ },
+ {
+ "ImportPath": "github.com/bugsnag/panicwrap",
+ "Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27"
+ },
+ {
+ "ImportPath": "github.com/crowdmob/goamz/aws",
+ "Rev": "cd22d9897beff6f3de22cec4bdb7d46b9e2dee67"
+ },
+ {
+ "ImportPath": "github.com/crowdmob/goamz/cloudfront",
+ "Rev": "cd22d9897beff6f3de22cec4bdb7d46b9e2dee67"
+ },
+ {
+ "ImportPath": "github.com/crowdmob/goamz/s3",
+ "Rev": "cd22d9897beff6f3de22cec4bdb7d46b9e2dee67"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/tarsum",
+ "Comment": "v1.4.1-330-g3fbf723",
+ "Rev": "3fbf723e81fa2696daa95847ccdcacddba6484da"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
+ "Comment": "v1.4.1-330-g3fbf723",
+ "Rev": "3fbf723e81fa2696daa95847ccdcacddba6484da"
+ },
+ {
+ "ImportPath": "github.com/docker/libtrust",
+ "Rev": "a9625ce37e2dc5fed2e51eec2d39c39e4ac4c1df"
+ },
+ {
+ "ImportPath": "github.com/gorilla/context",
+ "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a"
+ },
+ {
+ "ImportPath": "github.com/gorilla/handlers",
+ "Rev": "0e84b7d810c16aed432217e330206be156bafae0"
+ },
+ {
+ "ImportPath": "github.com/gorilla/mux",
+ "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
+ },
+ {
+ "ImportPath": "github.com/yvasiyarov/go-metrics",
+ "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e"
+ },
+ {
+ "ImportPath": "github.com/yvasiyarov/gorelic",
+ "Comment": "v0.0.6-8-ga9bba5b",
+ "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128"
+ },
+ {
+ "ImportPath": "github.com/yvasiyarov/newrelic_platform_go",
+ "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6"
+ },
+ {
+ "ImportPath": "gopkg.in/BrianBland/yaml.v2",
+ "Rev": "3e92d6a11b92fa4612d66712704844bdc0c48aed"
+ },
+ {
+ "ImportPath": "gopkg.in/check.v1",
+ "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673"
+ },
+ {
+ "ImportPath": "gopkg.in/yaml.v2",
+ "Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4"
+ }
+ ]
+}
diff --git a/Godeps/Readme b/Godeps/Readme
new file mode 100644
index 00000000..4cdaa53d
--- /dev/null
+++ b/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
new file mode 100644
index 00000000..f037d684
--- /dev/null
+++ b/Godeps/_workspace/.gitignore
@@ -0,0 +1,2 @@
+/pkg
+/bin
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE
new file mode 100644
index 00000000..ab6b011a
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go
new file mode 100644
index 00000000..50a0f2d0
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) UUID {
+ uuid := NewUUID()
+ if uuid != nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() UUID {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() UUID {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID or false.
+func (uuid UUID) Domain() (Domain, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return Domain(uuid[9]), true
+}
+
+// Id returns the id for a Version 2 UUID or false.
+func (uuid UUID) Id() (uint32, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return binary.BigEndian.Uint32(uuid[0:4]), true
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go
new file mode 100644
index 00000000..d8bd013e
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The uuid package generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
+package uuid
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go
new file mode 100644
index 00000000..cdd4192f
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known Name Space IDs and UUIDs
+var (
+ NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NIL = Parse("00000000-0000-0000-0000-000000000000")
+)
+
+// NewHash returns a new UUID dervied from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space)
+ h.Write([]byte(data))
+ s := h.Sum(nil)
+ uuid := make([]byte, 16)
+ copy(uuid, s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go
new file mode 100644
index 00000000..dd0a8ac1
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "net"
+
+var (
+ interfaces []net.Interface // cached list of interfaces
+ ifname string // name of interface being used
+ nodeID []byte // hardware for version 1 UUIDs
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil && name != "" {
+ return false
+ }
+ }
+
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ if setNodeID(ifs.HardwareAddr) {
+ ifname = ifs.Name
+ return true
+ }
+ }
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ randomBits(nodeID)
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+ nid := make([]byte, 6)
+ copy(nid, nodeID)
+ return nid
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if setNodeID(id) {
+ ifname = "user"
+ return true
+ }
+ return false
+}
+
+func setNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ copy(nodeID, id)
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ if len(uuid) != 16 {
+ return nil
+ }
+ node := make([]byte, 6)
+ copy(node, uuid[10:])
+ return node
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go
new file mode 100644
index 00000000..b9369c20
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go
@@ -0,0 +1,132 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ mu sync.Mutex
+ lasttime uint64 // last time we returned
+ clock_seq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// adjusts the clock sequence as needed. An error is returned if the current
+// time cannot be determined.
+func GetTime() (Time, error) {
+ defer mu.Unlock()
+ mu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence a new random
+// clock sequence is generated the first time a clock sequence is requested by
+// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
+// for
+func ClockSequence() int {
+ defer mu.Unlock()
+ mu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clock_seq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer mu.Unlock()
+ mu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ old_seq := clock_seq
+ clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if old_seq != clock_seq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. It returns false if uuid is not valid. The time is only well defined
+// for version 1 and 2 UUIDs.
+func (uuid UUID) Time() (Time, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time), true
+}
+
+// ClockSequence returns the clock sequence encoded in uuid. It returns false
+// if uuid is not valid. The clock sequence is only well defined for version 1
+// and 2 UUIDs.
+func (uuid UUID) ClockSequence() (int, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go
new file mode 100644
index 00000000..de40b102
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = []byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts the the first two hex bytes of x into a byte.
+func xtob(x string) (byte, bool) {
+ b1 := xvalues[x[0]]
+ b2 := xvalues[x[1]]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go
new file mode 100644
index 00000000..2920fae6
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go
@@ -0,0 +1,163 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID []byte
+
+// A Version represents a UUIDs version.
+type Version byte
+
+// A Variant represents a UUIDs variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// New returns a new random (version 4) UUID as a string. It is a convenience
+// function for NewRandom().String().
+func New() string {
+ return NewRandom().String()
+}
+
+// Parse decodes s into a UUID or returns nil. Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) UUID {
+ if len(s) == 36+9 {
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return nil
+ }
+ s = s[9:]
+ } else if len(s) != 36 {
+ return nil
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return nil
+ }
+ uuid := make([]byte, 16)
+ for i, x := range []int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ if v, ok := xtob(s[x:]); !ok {
+ return nil
+ } else {
+ uuid[i] = v
+ }
+ }
+ return uuid
+}
+
+// Equal returns true if uuid1 and uuid2 are equal.
+func Equal(uuid1, uuid2 UUID) bool {
+ return bytes.Equal(uuid1, uuid2)
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ if uuid == nil || len(uuid) != 16 {
+ return ""
+ }
+ b := []byte(uuid)
+ return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
+ b[:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+// Variant returns the variant encoded in uuid. It returns Invalid if
+// uuid is invalid.
+func (uuid UUID) Variant() Variant {
+ if len(uuid) != 16 {
+ return Invalid
+ }
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+ panic("unreachable")
+}
+
+// Version returns the verison of uuid. It returns false if uuid is not
+// valid.
+func (uuid UUID) Version() (Version, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return Version(uuid[6] >> 4), true
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implents io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
new file mode 100644
index 00000000..417ebeb2
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
@@ -0,0 +1,390 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+)
+
+type test struct {
+ in string
+ version Version
+ variant Variant
+ isuuid bool
+}
+
+var tests = []test{
+ {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
+ {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
+ {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
+ {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
+ {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
+ {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
+ {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
+ {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
+ {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
+ {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
+ {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
+ {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
+ {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
+ {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
+ {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
+ {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
+
+ {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
+ {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
+ {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
+ {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
+ {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
+ {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
+ {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
+ {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
+ {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
+ {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
+
+ {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
+ {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
+ {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
+ {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
+ {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
+ {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
+}
+
+var constants = []struct {
+ c interface{}
+ name string
+}{
+ {Person, "Person"},
+ {Group, "Group"},
+ {Org, "Org"},
+ {Invalid, "Invalid"},
+ {RFC4122, "RFC4122"},
+ {Reserved, "Reserved"},
+ {Microsoft, "Microsoft"},
+ {Future, "Future"},
+ {Domain(17), "Domain17"},
+ {Variant(42), "BadVariant42"},
+}
+
+func testTest(t *testing.T, in string, tt test) {
+ uuid := Parse(in)
+ if ok := (uuid != nil); ok != tt.isuuid {
+ t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
+ }
+ if uuid == nil {
+ return
+ }
+
+ if v := uuid.Variant(); v != tt.variant {
+ t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
+ }
+ if v, _ := uuid.Version(); v != tt.version {
+ t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
+ }
+}
+
+func TestUUID(t *testing.T) {
+ for _, tt := range tests {
+ testTest(t, tt.in, tt)
+ testTest(t, strings.ToUpper(tt.in), tt)
+ }
+}
+
+func TestConstants(t *testing.T) {
+ for x, tt := range constants {
+ v, ok := tt.c.(fmt.Stringer)
+ if !ok {
+ t.Errorf("%x: %v: not a stringer", x, v)
+ } else if s := v.String(); s != tt.name {
+ v, _ := tt.c.(int)
+ t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name)
+ }
+ }
+}
+
+func TestRandomUUID(t *testing.T) {
+ m := make(map[string]bool)
+ for x := 1; x < 32; x++ {
+ uuid := NewRandom()
+ s := uuid.String()
+ if m[s] {
+ t.Errorf("NewRandom returned duplicated UUID %s\n", s)
+ }
+ m[s] = true
+ if v, _ := uuid.Version(); v != 4 {
+ t.Errorf("Random UUID of version %s\n", v)
+ }
+ if uuid.Variant() != RFC4122 {
+ t.Errorf("Random UUID is variant %d\n", uuid.Variant())
+ }
+ }
+}
+
+func TestNew(t *testing.T) {
+ m := make(map[string]bool)
+ for x := 1; x < 32; x++ {
+ s := New()
+ if m[s] {
+ t.Errorf("New returned duplicated UUID %s\n", s)
+ }
+ m[s] = true
+ uuid := Parse(s)
+ if uuid == nil {
+ t.Errorf("New returned %q which does not decode\n", s)
+ continue
+ }
+ if v, _ := uuid.Version(); v != 4 {
+ t.Errorf("Random UUID of version %s\n", v)
+ }
+ if uuid.Variant() != RFC4122 {
+ t.Errorf("Random UUID is variant %d\n", uuid.Variant())
+ }
+ }
+}
+
+func clockSeq(t *testing.T, uuid UUID) int {
+ seq, ok := uuid.ClockSequence()
+ if !ok {
+ t.Fatalf("%s: invalid clock sequence\n", uuid)
+ }
+ return seq
+}
+
+func TestClockSeq(t *testing.T) {
+ // Fake time.Now for this test to return a monotonically advancing time; restore it at end.
+ defer func(orig func() time.Time) { timeNow = orig }(timeNow)
+ monTime := time.Now()
+ timeNow = func() time.Time {
+ monTime = monTime.Add(1 * time.Second)
+ return monTime
+ }
+
+ SetClockSequence(-1)
+ uuid1 := NewUUID()
+ uuid2 := NewUUID()
+
+ if clockSeq(t, uuid1) != clockSeq(t, uuid2) {
+ t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2))
+ }
+
+ SetClockSequence(-1)
+ uuid2 = NewUUID()
+
+ // Just on the very off chance we generated the same sequence
+ // two times we try again.
+ if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
+ SetClockSequence(-1)
+ uuid2 = NewUUID()
+ }
+ if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
+ t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1))
+ }
+
+ SetClockSequence(0x1234)
+ uuid1 = NewUUID()
+ if seq := clockSeq(t, uuid1); seq != 0x1234 {
+ t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq)
+ }
+}
+
+func TestCoding(t *testing.T) {
+ text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
+ urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
+ data := UUID{
+ 0x7d, 0x44, 0x48, 0x40,
+ 0x9d, 0xc0,
+ 0x11, 0xd1,
+ 0xb2, 0x45,
+ 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
+ }
+ if v := data.String(); v != text {
+ t.Errorf("%x: encoded to %s, expected %s\n", data, v, text)
+ }
+ if v := data.URN(); v != urn {
+ t.Errorf("%x: urn is %s, expected %s\n", data, v, urn)
+ }
+
+ uuid := Parse(text)
+ if !Equal(uuid, data) {
+ t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data)
+ }
+}
+
+func TestVersion1(t *testing.T) {
+ uuid1 := NewUUID()
+ uuid2 := NewUUID()
+
+ if Equal(uuid1, uuid2) {
+ t.Errorf("%s:duplicate uuid\n", uuid1)
+ }
+ if v, _ := uuid1.Version(); v != 1 {
+ t.Errorf("%s: version %s expected 1\n", uuid1, v)
+ }
+ if v, _ := uuid2.Version(); v != 1 {
+ t.Errorf("%s: version %s expected 1\n", uuid2, v)
+ }
+ n1 := uuid1.NodeID()
+ n2 := uuid2.NodeID()
+ if !bytes.Equal(n1, n2) {
+ t.Errorf("Different nodes %x != %x\n", n1, n2)
+ }
+ t1, ok := uuid1.Time()
+ if !ok {
+ t.Errorf("%s: invalid time\n", uuid1)
+ }
+ t2, ok := uuid2.Time()
+ if !ok {
+ t.Errorf("%s: invalid time\n", uuid2)
+ }
+ q1, ok := uuid1.ClockSequence()
+ if !ok {
+ t.Errorf("%s: invalid clock sequence\n", uuid1)
+ }
+ q2, ok := uuid2.ClockSequence()
+ if !ok {
+ t.Errorf("%s: invalid clock sequence", uuid2)
+ }
+
+ switch {
+ case t1 == t2 && q1 == q2:
+ t.Errorf("time stopped\n")
+ case t1 > t2 && q1 == q2:
+ t.Errorf("time reversed\n")
+ case t1 < t2 && q1 != q2:
+ t.Errorf("clock sequence chaned unexpectedly\n")
+ }
+}
+
+func TestNodeAndTime(t *testing.T) {
+ // Time is February 5, 1998 12:30:23.136364800 AM GMT
+
+ uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
+ node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
+
+ ts, ok := uuid.Time()
+ if ok {
+ c := time.Unix(ts.UnixTime())
+ want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
+ if !c.Equal(want) {
+ t.Errorf("Got time %v, want %v", c, want)
+ }
+ } else {
+ t.Errorf("%s: bad time\n", uuid)
+ }
+ if !bytes.Equal(node, uuid.NodeID()) {
+ t.Errorf("Expected node %v got %v\n", node, uuid.NodeID())
+ }
+}
+
+func TestMD5(t *testing.T) {
+ uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String()
+ want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
+ if uuid != want {
+ t.Errorf("MD5: got %q expected %q\n", uuid, want)
+ }
+}
+
+func TestSHA1(t *testing.T) {
+ uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String()
+ want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
+ if uuid != want {
+ t.Errorf("SHA1: got %q expected %q\n", uuid, want)
+ }
+}
+
+func TestNodeID(t *testing.T) {
+ nid := []byte{1, 2, 3, 4, 5, 6}
+ SetNodeInterface("")
+ s := NodeInterface()
+ if s == "" || s == "user" {
+ t.Errorf("NodeInterface %q after SetInteface\n", s)
+ }
+ node1 := NodeID()
+ if node1 == nil {
+ t.Errorf("NodeID nil after SetNodeInterface\n", s)
+ }
+ SetNodeID(nid)
+ s = NodeInterface()
+ if s != "user" {
+ t.Errorf("Expected NodeInterface %q got %q\n", "user", s)
+ }
+ node2 := NodeID()
+ if node2 == nil {
+ t.Errorf("NodeID nil after SetNodeID\n", s)
+ }
+ if bytes.Equal(node1, node2) {
+ t.Errorf("NodeID not changed after SetNodeID\n", s)
+ } else if !bytes.Equal(nid, node2) {
+ t.Errorf("NodeID is %x, expected %x\n", node2, nid)
+ }
+}
+
+func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) {
+ if uuid == nil {
+ t.Errorf("%s failed\n", name)
+ return
+ }
+ if v, _ := uuid.Version(); v != 2 {
+ t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v)
+ return
+ }
+ if v, ok := uuid.Domain(); !ok || v != domain {
+ if !ok {
+ t.Errorf("%s: %d: Domain failed\n", name, uuid)
+ } else {
+ t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v)
+ }
+ }
+ if v, ok := uuid.Id(); !ok || v != id {
+ if !ok {
+ t.Errorf("%s: %d: Id failed\n", name, uuid)
+ } else {
+ t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v)
+ }
+ }
+}
+
+func TestDCE(t *testing.T) {
+ testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678)
+ testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid()))
+ testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid()))
+}
+
+type badRand struct{}
+
+func (r badRand) Read(buf []byte) (int, error) {
+ for i, _ := range buf {
+ buf[i] = byte(i)
+ }
+ return len(buf), nil
+}
+
+func TestBadRand(t *testing.T) {
+ SetRand(badRand{})
+ uuid1 := New()
+ uuid2 := New()
+ if uuid1 != uuid2 {
+ t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2)
+ }
+ SetRand(nil)
+ uuid1 = New()
+ uuid2 = New()
+ if uuid1 == uuid2 {
+ t.Errorf("unexecpted duplicates, got %q\n", uuid1)
+ }
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go
new file mode 100644
index 00000000..63580044
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil.
+func NewUUID() UUID {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+
+ now, err := GetTime()
+ if err != nil {
+ return nil
+ }
+
+ uuid := make([]byte, 16)
+
+ time_low := uint32(now & 0xffffffff)
+ time_mid := uint16((now >> 32) & 0xffff)
+ time_hi := uint16((now >> 48) & 0x0fff)
+ time_hi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], time_low)
+ binary.BigEndian.PutUint16(uuid[4:], time_mid)
+ binary.BigEndian.PutUint16(uuid[6:], time_hi)
+ binary.BigEndian.PutUint16(uuid[8:], clock_seq)
+ copy(uuid[10:], nodeID)
+
+ return uuid
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go
new file mode 100644
index 00000000..b3d4a368
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+// Random returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() UUID {
+ uuid := make([]byte, 16)
+ randomBits([]byte(uuid))
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
new file mode 100644
index 00000000..66be63a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
@@ -0,0 +1 @@
+logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
new file mode 100644
index 00000000..c3af3ce2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - 1.2
+ - 1.3
+ - tip
+install:
+ - go get github.com/stretchr/testify
+ - go get github.com/stvp/go-udp-testing
+ - go get github.com/tobi/airbrake-go
+ - go get github.com/getsentry/raven-go
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 00000000..f090cb42
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 00000000..cabd027a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,349 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0), the core API is unlikely change much but please version
+control your Logrus to make sure you aren't fetching latest `master` on every
+build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+attached, the output is compatible with the
+[l2met](http://r.32k.io/l2met-introduction) format:
+
+```text
+time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
+time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
+time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
+time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
+time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(&logrus_airbrake.AirbrakeHook{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+```go
+// Not the real implementation of the Airbrake hook. Just a simple sample.
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ log.AddHook(new(AirbrakeHook))
+}
+
+type AirbrakeHook struct{}
+
+// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
+// the fields for the entry. See the Fields section of the README.
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ err := airbrake.Notify(entry.Data["error"].(error))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Info("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+// `Levels()` returns a slice of `Levels` the hook is fired for.
+func (hook *AirbrakeHook) Levels() []log.Level {
+ return []log.Level{
+ log.ErrorLevel,
+ log.FatalLevel,
+ log.PanicLevel,
+ }
+}
+```
+
+Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
+ Send errors to an exception tracking service compatible with the Airbrake API.
+ Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
+
+* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
+ Send errors to the Papertrail hosted logging service via UDP.
+
+* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
+ Send errors to remote syslog server.
+ Uses standard library `log/syslog` behind the scenes.
+
+* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
+ Send errors to a channel in hipchat.
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(logrus.JSONFormatter)
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(logrus.TextFormatter)
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotated(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 00000000..e164eecb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,248 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+func (entry *Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
new file mode 100644
index 00000000..98717df4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntryPanicln(t *testing.T) {
+ errBoom := fmt.Errorf("boom time")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+ errBoom := fmt.Errorf("boom again")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom true", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 00000000..a62ba45d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 00000000..42e7a4c9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,35 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/tobi/airbrake-go"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
+}
+
+func main() {
+ airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
+ airbrake.ApiKey = "whatever"
+ airbrake.Environment = "production"
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 00000000..d0871244
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,182 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 00000000..74c49a0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,44 @@
+package logrus
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(entry *Entry) {
+ _, ok := entry.Data["time"]
+ if ok {
+ entry.Data["fields.time"] = entry.Data["time"]
+ }
+
+ _, ok = entry.Data["msg"]
+ if ok {
+ entry.Data["fields.msg"] = entry.Data["msg"]
+ }
+
+ _, ok = entry.Data["level"]
+ if ok {
+ entry.Data["fields.level"] = entry.Data["level"]
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 00000000..77989da6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,88 @@
+package logrus
+
+import (
+ "testing"
+ "time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+ "five": "six",
+ "seven": "eight",
+ "nine": "ten",
+ "eleven": "twelve",
+ "thirteen": "fourteen",
+ "fifteen": "sixteen",
+ "seventeen": "eighteen",
+ "nineteen": "twenty",
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ "i": "j",
+ "k": "l",
+ "m": "n",
+ "o": "p",
+ "q": "r",
+ "s": "t",
+ "u": "v",
+ "w": "x",
+ "y": "z",
+ "this": "will",
+ "make": "thirty",
+ "entries": "yeah",
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ entry := &Entry{
+ Time: time.Time{},
+ Level: InfoLevel,
+ Message: "message",
+ Data: fields,
+ }
+ var d []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ d, err = formatter.Format(entry)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(d)))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
new file mode 100644
index 00000000..13f34cb6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
@@ -0,0 +1,122 @@
+package logrus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+ Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookFires(t *testing.T) {
+ hook := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ assert.Equal(t, hook.Fired, false)
+
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+ entry.Data["wow"] = "whale"
+ return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+ hook := new(ModifyHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ })
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+ hook1 := new(ModifyHook)
+ hook2 := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook1)
+ log.Hooks.Add(hook2)
+
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ assert.Equal(t, hook2.Fired, true)
+ })
+}
+
+type ErrorHook struct {
+ Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+ return []Level{
+ ErrorLevel,
+ }
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, false)
+ })
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Error("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 00000000..0da2b365
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks levelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
new file mode 100644
index 00000000..880d21ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
@@ -0,0 +1,54 @@
+package logrus_airbrake
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/tobi/airbrake-go"
+)
+
+// AirbrakeHook to send exceptions to an exception-tracking service compatible
+// with the Airbrake API. You must set:
+// * airbrake.Endpoint
+// * airbrake.ApiKey
+// * airbrake.Environment (only sends exceptions when set to "production")
+//
+// Before using this hook, to send an error. Entries that trigger an Error,
+// Fatal or Panic should now include an "error" field to send to Airbrake.
+type AirbrakeHook struct{}
+
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ if entry.Data["error"] == nil {
+ entry.Logger.WithFields(logrus.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
+ return nil
+ }
+
+ err, ok := entry.Data["error"].(error)
+ if !ok {
+ entry.Logger.WithFields(logrus.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
+ return nil
+ }
+
+ airErr := airbrake.Notify(err)
+ if airErr != nil {
+ entry.Logger.WithFields(logrus.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ "error": airErr,
+ }).Warn("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+func (hook *AirbrakeHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
new file mode 100644
index 00000000..ae61e922
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
@@ -0,0 +1,28 @@
+# Papertrail Hook for Logrus
+
+[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
+
+In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
+
+## Usage
+
+You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
+
+For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/papertrail"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
new file mode 100644
index 00000000..12c56f29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
@@ -0,0 +1,54 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+const (
+ format = "Jan 2 15:04:05"
+)
+
+// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
+type PapertrailHook struct {
+ Host string
+ Port int
+ AppName string
+ UDPConn net.Conn
+}
+
+// NewPapertrailHook creates a hook to be added to an instance of logger.
+func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
+ conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
+ return &PapertrailHook{host, port, appName, conn}, err
+}
+
+// Fire is called when a log event is fired.
+func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
+ date := time.Now().Format(format)
+ payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Level, entry.Message)
+
+ bytesWritten, err := hook.UDPConn.Write([]byte(payload))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
+ return err
+ }
+
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *PapertrailHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
new file mode 100644
index 00000000..96318d00
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
@@ -0,0 +1,26 @@
+package logrus_papertrail
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/stvp/go-udp-testing"
+)
+
+func TestWritingToUDP(t *testing.T) {
+ port := 16661
+ udp.SetAddr(fmt.Sprintf(":%d", port))
+
+ hook, err := NewPapertrailHook("localhost", port, "test")
+ if err != nil {
+ t.Errorf("Unable to connect to local UDP server.")
+ }
+
+ log := logrus.New()
+ log.Hooks.Add(hook)
+
+ udp.ShouldReceive(t, "foo", func() {
+ log.Info("foo")
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
new file mode 100644
index 00000000..a409f3b0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
@@ -0,0 +1,61 @@
+# Sentry Hook for Logrus
+
+[Sentry](https://getsentry.com) provides both self-hosted and hosted
+solutions for exception tracking.
+Both client and server are
+[open source](https://github.com/getsentry/sentry).
+
+## Usage
+
+Every sentry application defined on the server gets a different
+[DSN](https://www.getsentry.com/docs/). In the example below replace
+`YOUR_DSN` with the one created for your application.
+
+```go
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/sentry"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ })
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+## Special fields
+
+Some logrus fields have a special meaning in this hook,
+these are server_name and logger.
+When logs are sent to sentry these fields are treated differently.
+- server_name (also known as hostname) is the name of the server which
+is logging the event (hostname.example.com)
+- logger is the part of the application which is logging the event.
+In go this usually means setting it to the name of the package.
+
+## Timeout
+
+`Timeout` is the time the sentry hook will wait for a response
+from the sentry server.
+
+If this time elapses with no response from
+the server an error will be returned.
+
+If `Timeout` is set to 0 the SentryHook will not wait for a reply
+and will assume a correct delivery.
+
+The SentryHook has a default timeout of `100 milliseconds` when created
+with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
+
+```go
+hook, _ := logrus_sentry.NewSentryHook(...)
+hook.Timeout = 20*time.Seconds
+```
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
new file mode 100644
index 00000000..379f281c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
@@ -0,0 +1,100 @@
+package logrus_sentry
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+var (
+ severityMap = map[logrus.Level]raven.Severity{
+ logrus.DebugLevel: raven.DEBUG,
+ logrus.InfoLevel: raven.INFO,
+ logrus.WarnLevel: raven.WARNING,
+ logrus.ErrorLevel: raven.ERROR,
+ logrus.FatalLevel: raven.FATAL,
+ logrus.PanicLevel: raven.FATAL,
+ }
+)
+
+func getAndDel(d logrus.Fields, key string) (string, bool) {
+ var (
+ ok bool
+ v interface{}
+ val string
+ )
+ if v, ok = d[key]; !ok {
+ return "", false
+ }
+
+ if val, ok = v.(string); !ok {
+ return "", false
+ }
+ delete(d, key)
+ return val, true
+}
+
+// SentryHook delivers logs to a sentry server.
+type SentryHook struct {
+ // Timeout sets the time to wait for a delivery error from the sentry server.
+ // If this is set to zero the server will not wait for any response and will
+ // consider the message correctly sent
+ Timeout time.Duration
+
+ client *raven.Client
+ levels []logrus.Level
+}
+
+// NewSentryHook creates a hook to be added to an instance of logger
+// and initializes the raven client.
+// This method sets the timeout to 100 milliseconds.
+func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
+ client, err := raven.NewClient(DSN, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &SentryHook{100 * time.Millisecond, client, levels}, nil
+}
+
+// Called when an event should be sent to sentry
+// Special fields that sentry uses to give more information to the server
+// are extracted from entry.Data (if they are found)
+// These fields are: logger and server_name
+func (hook *SentryHook) Fire(entry *logrus.Entry) error {
+ packet := &raven.Packet{
+ Message: entry.Message,
+ Timestamp: raven.Timestamp(entry.Time),
+ Level: severityMap[entry.Level],
+ Platform: "go",
+ }
+
+ d := entry.Data
+
+ if logger, ok := getAndDel(d, "logger"); ok {
+ packet.Logger = logger
+ }
+ if serverName, ok := getAndDel(d, "server_name"); ok {
+ packet.ServerName = serverName
+ }
+ packet.Extra = map[string]interface{}(d)
+
+ _, errCh := hook.client.Capture(packet, nil)
+ timeout := hook.Timeout
+ if timeout != 0 {
+ timeoutCh := time.After(timeout)
+ select {
+ case err := <-errCh:
+ return err
+ case <-timeoutCh:
+ return fmt.Errorf("no response from sentry server in %s", timeout)
+ }
+ }
+ return nil
+}
+
+// Levels returns the available logging levels.
+func (hook *SentryHook) Levels() []logrus.Level {
+ return hook.levels
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
new file mode 100644
index 00000000..45f18d17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
@@ -0,0 +1,97 @@
+package logrus_sentry
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/getsentry/raven-go"
+)
+
+const (
+ message = "error message"
+ server_name = "testserver.internal"
+ logger_name = "test.logger"
+)
+
+func getTestLogger() *logrus.Logger {
+ l := logrus.New()
+ l.Out = ioutil.Discard
+ return l
+}
+
+func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
+ pch := make(chan *raven.Packet, 1)
+ s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+ d := json.NewDecoder(req.Body)
+ p := &raven.Packet{}
+ err := d.Decode(p)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ pch <- p
+ }))
+ defer s.Close()
+
+ fragments := strings.SplitN(s.URL, "://", 2)
+ dsn := fmt.Sprintf(
+ "%s://public:secret@%s/sentry/project-id",
+ fragments[0],
+ fragments[1],
+ )
+ tf(dsn, pch)
+}
+
+func TestSpecialFields(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+
+ hook, err := NewSentryHook(dsn, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+ logger.WithFields(logrus.Fields{
+ "server_name": server_name,
+ "logger": logger_name,
+ }).Error(message)
+
+ packet := <-pch
+ if packet.Logger != logger_name {
+ t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
+ }
+
+ if packet.ServerName != server_name {
+ t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
+ }
+ })
+}
+
+func TestSentryHandler(t *testing.T) {
+ WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
+ logger := getTestLogger()
+ hook, err := NewSentryHook(dsn, []logrus.Level{
+ logrus.ErrorLevel,
+ })
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ logger.Hooks.Add(hook)
+
+ logger.Error(message)
+ packet := <-pch
+ if packet.Message != message {
+ t.Errorf("message should have been %s, was %s", message, packet.Message)
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 00000000..cd706bc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,20 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 00000000..b6fa3746
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,59 @@
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.PanicLevel,
+ logrus.FatalLevel,
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ logrus.InfoLevel,
+ logrus.DebugLevel,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 00000000..42762dc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,26 @@
+package logrus_syslog
+
+import (
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "testing"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+ log := logrus.New()
+ hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err != nil {
+ t.Errorf("Unable to connect to local syslog.")
+ }
+
+ log.Hooks.Add(hook)
+
+ for _, level := range hook.Levels() {
+ if len(log.Hooks[level]) != 1 {
+ t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+ }
+ }
+
+ log.Info("Congratulations!")
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 00000000..9d11b642
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,22 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type JSONFormatter struct{}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ prefixFieldClashes(entry)
+ entry.Data["time"] = entry.Time.Format(time.RFC3339)
+ entry.Data["msg"] = entry.Message
+ entry.Data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 00000000..7374fe36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks levelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(levelHooks),
+// Level: logrus.Debug,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stdout,
+ Formatter: new(TextFormatter),
+ Hooks: make(levelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ NewEntry(logger).Debugf(format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ NewEntry(logger).Infof(format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ NewEntry(logger).Errorf(format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ NewEntry(logger).Fatalf(format, args...)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ NewEntry(logger).Panicf(format, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ NewEntry(logger).Debug(args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ NewEntry(logger).Error(args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ NewEntry(logger).Fatal(args...)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ NewEntry(logger).Panic(args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ NewEntry(logger).Debugln(args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ NewEntry(logger).Infoln(args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ NewEntry(logger).Errorln(args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ NewEntry(logger).Fatalln(args...)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ NewEntry(logger).Panicln(args...)
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 00000000..43ee12e9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,94 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var _ StdLogger = &log.Logger{}
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
new file mode 100644
index 00000000..15157d17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
@@ -0,0 +1,247 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ log(logger)
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+ var buffer bytes.Buffer
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = &TextFormatter{
+ DisableColors: true,
+ }
+
+ log(logger)
+
+ fields := make(map[string]string)
+ for _, kv := range strings.Split(buffer.String(), " ") {
+ if !strings.Contains(kv, "=") {
+ continue
+ }
+ kvArr := strings.Split(kv, "=")
+ key := strings.TrimSpace(kvArr[0])
+ val, err := strconv.Unquote(kvArr[1])
+ assert.NoError(t, err)
+ fields[key] = val
+ }
+ assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestInfo(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestWarn(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Warn("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "warning")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test test")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test 10")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "testtest")
+ })
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ localLog := logger.WithFields(Fields{
+ "key1": "value1",
+ })
+
+ localLog.WithField("key2", "value2").Info("test")
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assert.Equal(t, "value2", fields["key2"])
+ assert.Equal(t, "value1", fields["key1"])
+
+ buffer = bytes.Buffer{}
+ fields = Fields{}
+ localLog.Info("test")
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ _, ok := fields["key2"]
+ assert.Equal(t, false, ok)
+ assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ })
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["fields.msg"], "hello")
+ })
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("time", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["fields.time"], "hello")
+ })
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("level", 1).Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["level"], "info")
+ assert.Equal(t, fields["fields.level"], 1)
+ })
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+ LogAndAssertText(t, func(log *Logger) {
+ ll := log.WithField("herp", "derp")
+ ll.Info("hello")
+ ll.Info("bye")
+ }, func(fields map[string]string) {
+ for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+ if _, ok := fields[fieldName]; ok {
+ t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+ }
+ }
+ })
+}
+
+func TestConvertLevelToString(t *testing.T) {
+ assert.Equal(t, "debug", DebugLevel.String())
+ assert.Equal(t, "info", InfoLevel.String())
+ assert.Equal(t, "warning", WarnLevel.String())
+ assert.Equal(t, "error", ErrorLevel.String())
+ assert.Equal(t, "fatal", FatalLevel.String())
+ assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+ l, err := ParseLevel("panic")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("fatal")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("error")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("warn")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("warning")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("info")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("debug")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("invalid")
+ assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
new file mode 100644
index 00000000..8fe02a4a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
new file mode 100644
index 00000000..0428ee5d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
@@ -0,0 +1,20 @@
+/*
+ Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+*/
+package logrus
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 00000000..a2c0b40d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 00000000..276447bd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 00000000..2e09f6f7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 00000000..fc0a4082
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,95 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+ DisableColors bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+
+ var keys []string
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry)
+
+ isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+
+ if isColored {
+ printColored(b, entry, keys)
+ } else {
+ f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ switch value.(type) {
+ case string, error:
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ default:
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml
new file mode 100644
index 00000000..a77773b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - tip
+
+install:
+ - go get github.com/bugsnag/panicwrap
+ - go get github.com/bugsnag/osext
+ - go get github.com/bitly/go-simplejson
+ - go get github.com/revel/revel
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt
new file mode 100644
index 00000000..3cb0ec0f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/LICENSE.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2014 Bugsnag
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md
new file mode 100644
index 00000000..b5432293
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/README.md
@@ -0,0 +1,489 @@
+Bugsnag Notifier for Golang
+===========================
+
+The Bugsnag Notifier for Golang gives you instant notification of panics, or
+unexpected errors, in your golang app. Any unhandled panics will trigger a
+notification to be sent to your Bugsnag project.
+
+[Bugsnag](http://bugsnag.com) captures errors in real-time from your web,
+mobile and desktop applications, helping you to understand and resolve them
+as fast as possible. [Create a free account](http://bugsnag.com) to start
+capturing exceptions from your applications.
+
+## How to Install
+
+1. Download the code
+
+ ```shell
+ go get github.com/bugsnag/bugsnag-go
+ ```
+
+### Using with net/http apps
+
+For a golang app based on [net/http](https://godoc.org/net/http), integrating
+Bugsnag takes two steps. You should also use these instructions if you're using
+the [gorilla toolkit](http://www.gorillatoolkit.org/), or the
+[pat](https://github.com/bmizerany/pat/) muxer.
+
+1. Configure bugsnag at the start of your `main()` function:
+
+ ```go
+ import "github.com/bugsnag/bugsnag-go"
+
+ func main() {
+ bugsnag.Configure(bugsnag.Configuration{
+ APIKey: "YOUR_API_KEY_HERE",
+ ReleaseStage: "production",
+ // more configuration options
+ })
+
+ // rest of your program.
+ }
+ ```
+
+2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler)
+
+ ```go
+ // a. If you're using the builtin http mux, you can just pass
+ // bugsnag.Handler(nil) to http.ListenAndServer
+ http.ListenAndServe(":8080", bugsnag.Handler(nil))
+
+ // b. If you're creating a server manually yourself, you can set
+ // its handlers the same way
+ srv := http.Server{
+ Handler: bugsnag.Handler(nil)
+ }
+
+ // c. If you're not using the builtin http mux, wrap your own handler
+ // (though make sure that it doesn't already catch panics)
+ http.ListenAndServe(":8080", bugsnag.Handler(handler))
+ ```
+
+### Using with Revel apps
+
+There are two steps to get panic handling in [revel](https://revel.github.io) apps.
+
+1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`:
+
+ ```go
+
+ import "github.com/bugsnag/bugsnag-go/revel"
+
+ revel.Filters = []revel.Filter{
+ revel.PanicFilter,
+ bugsnagrevel.Filter,
+ // ...
+ }
+ ```
+
+2. Set bugsnag.apikey in the top section of `conf/app.conf`.
+
+ ```
+ module.static=github.com/revel/revel/modules/static
+
+ bugsnag.apikey=YOUR_API_KEY_HERE
+
+ [dev]
+ ```
+
+### Using with Google App Engine
+
+1. Configure bugsnag at the start of your `init()` function:
+
+ ```go
+ import "github.com/bugsnag/bugsnag-go"
+
+ func init() {
+ bugsnag.Configure(bugsnag.Configuration{
+ APIKey: "YOUR_API_KEY_HERE",
+ })
+
+ // ...
+ }
+ ```
+
+2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag:
+
+ ```go
+ // a. If you're using HandlerFuncs
+ http.HandleFunc("/", bugsnag.HandlerFunc(
+ func (w http.ResponseWriter, r *http.Request) {
+ // ...
+ }))
+
+ // b. If you're using Handlers
+ http.Handle("/", bugsnag.Handler(myHttpHandler))
+ ```
+
+3. In order to use Bugsnag, you must provide the current
+[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or
+current `*http.Request` as rawData. The easiest way to do this is to create a new notifier.
+
+ ```go
+ c := appengine.NewContext(r)
+ notifier := bugsnag.New(c)
+
+ if err != nil {
+ notifier.Notify(err)
+ }
+
+ go func () {
+ defer notifier.Recover()
+
+ // ...
+ }()
+ ```
+
+
+## Notifying Bugsnag manually
+
+Bugsnag will automatically handle any panics that crash your program and notify
+you of them. If you've integrated with `revel` or `net/http`, then you'll also
+be notified of any panics() that happen while processing a request.
+
+Sometimes however it's useful to manually notify Bugsnag of a problem. To do this,
+call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify)
+
+```go
+if err != nil {
+ bugsnag.Notify(err)
+}
+```
+
+### Manual panic handling
+
+To avoid a panic in a goroutine from crashing your entire app, you can use
+[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover)
+to stop a panic from unwinding the stack any further. When `Recover()` is hit,
+it will send any current panic to Bugsnag and then stop panicking. This is
+most useful at the start of a goroutine:
+
+```go
+go func() {
+ defer bugsnag.Recover()
+
+ // ...
+}()
+```
+
+Alternatively you can use
+[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover)
+to notify bugsnag of a panic while letting the program continue to panic. This
+is useful if you're using a Framework that already has some handling of panics
+and you are retrofitting bugsnag support.
+
+```go
+defer bugsnag.AutoNotify()
+```
+
+## Sending Custom Data
+
+Most functions in the Bugsnag API, including `bugsnag.Notify()`,
+`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you
+attach data to the notifications that they send. To do this you pass in rawData,
+which can be any of the supported types listed here. To add support for more
+types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify).
+
+### Custom MetaData
+
+Custom metaData appears as tabs on Bugsnag.com. You can set it by passing
+a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData)
+object as rawData.
+
+```go
+bugsnag.Notify(err,
+ bugsnag.MetaData{
+ "Account": {
+ "Name": Account.Name,
+ "Paying": Account.Plan.Premium,
+ },
+ })
+```
+
+### Request data
+
+Bugsnag can extract interesting data from
+[`*http.Request`](https://godoc.org/net/http/#Request) objects, and
+[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller)
+objects. These are automatically passed in when handling panics, and you can
+pass them yourself.
+
+```go
+func (w http.ResponseWriter, r *http.Request) {
+ bugsnag.Notify(err, r)
+}
+```
+
+### User data
+
+User data is searchable, and the `Id` powers the count of users affected. You
+can set which user an error affects by passing a
+[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as
+rawData.
+
+```go
+bugsnag.Notify(err,
+ bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"})
+```
+
+### Context
+
+The context shows up prominently in the list view so that you can get an idea
+of where a problem occurred. You can set it by passing a
+[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context)
+object as rawData.
+
+```go
+bugsnag.Notify(err, bugsnag.Context{"backgroundJob"})
+```
+
+### Severity
+
+Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`.
+You can set the severity of an error by passing one of these objects as rawData.
+
+```go
+bugsnag.Notify(err, bugsnag.SeverityInfo)
+```
+
+## Configuration
+
+You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it
+a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object
+containing any of the following values.
+
+### APIKey
+
+The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings".
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ APIKey: "YOUR_API_KEY_HERE",
+})
+```
+
+### Endpoint
+
+The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise,
+you should set this to the endpoint of your local instance.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ Endpoint: "http://bugsnag.internal:49000/",
+})
+```
+
+### ReleaseStage
+
+The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`,
+`development` or similar as appropriate.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ ReleaseStage: "development",
+})
+```
+
+### NotifyReleaseStages
+
+The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but
+you can use this to silence development errors.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ NotifyReleaseStages: []string{"production", "staging"},
+})
+```
+
+### AppVersion
+
+If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only
+re-open errors if they occur in later version of the app.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ AppVersion: "1.2.3",
+})
+```
+
+### Hostname
+
+The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The
+default value is obtained from `os.Hostname()` so you won't often need to change this.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ Hostname: "go1",
+})
+```
+
+### ProjectPackages
+
+In order to determine where a crash happens Bugsnag needs to know which packages you consider to
+be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings
+are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match).
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ ProjectPackages: []string{"main", "github.com/domain/myapp/*"},
+}
+```
+
+### ParamsFilters
+
+Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by
+setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters
+will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like
+`password`, `password_confirmation` and `secret_answer` from being sent.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ ParamsFilters: []string{"password", "secret"},
+}
+```
+
+### Logger
+
+The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ Logger: app.Logger,
+}
+```
+
+### PanicHandler
+
+The first time Bugsnag is configured, it wraps the running program in a panic
+handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This
+forks a sub-process which monitors unhandled panics. To prevent this, set
+`PanicHandler` to `func() {}` the first time you call
+`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about
+unhandled panics.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ PanicHandler: func() {},
+})
+```
+
+### Synchronous
+
+Bugsnag usually starts a new goroutine before sending notifications. This means
+that notifications can be lost if you do a bugsnag.Notify and then immediately
+os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()`
+instead ;).
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ Synchronous: true
+})
+```
+
+Or just for one error:
+
+```go
+bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true})
+```
+
+### Transport
+
+The transport configures how Bugsnag makes http requests. By default we use
+[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles
+HTTP proxies automatically using the `$HTTP_PROXY` environment variable.
+
+```go
+bugsnag.Configure(bugsnag.Configuration{
+ Transport: http.DefaultTransport,
+})
+```
+
+## Custom data with OnBeforeNotify
+
+While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`,
+`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and
+inefficient — you're constructing the meta-data whether or not it will actually
+be used. A better idea is to pass raw data in to these functions, and add an
+`OnBeforeNotify` filter that converts them into `MetaData`.
+
+For example, lets say our system processes jobs:
+
+```go
+type Job struct{
+ Retry bool
+ UserId string
+ UserEmail string
+ Name string
+ Params map[string]string
+}
+```
+
+You can pass a job directly into Bugsnag.notify:
+
+```go
+bugsnag.Notify(err, job)
+```
+
+And then add a filter to extract information from that job and attach it to the
+Bugsnag event:
+
+```go
+bugsnag.OnBeforeNotify(
+ func(event *bugsnag.Event, config *bugsnag.Configuration) error {
+
+ // Search all the RawData for any *Job pointers that we're passed in
+ // to bugsnag.Notify() and friends.
+ for _, datum := range event.RawData {
+ if job, ok := datum.(*Job); ok {
+ // don't notify bugsnag about errors in retries
+ if job.Retry {
+ return fmt.Errorf("not notifying about retried jobs")
+ }
+
+ // add the job as a tab on Bugsnag.com
+ event.MetaData.AddStruct("Job", job)
+
+ // set the user correctly
+ event.User = &User{Id: job.UserId, Email: job.UserEmail}
+ }
+ }
+
+ // continue notifying as normal
+ return nil
+ })
+```
+
+## Advanced Usage
+
+If you want to have multiple different configurations around in one program,
+you can use `bugsnag.New()` to create multiple independent instances of
+Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in
+mind that until you call `bugsnag.Configure()` unhandled panics will not be
+sent to bugsnag.
+
+```go
+notifier := bugsnag.New(bugsnag.Configuration{
+ APIKey: "YOUR_OTHER_API_KEY",
+})
+```
+
+In fact any place that lets you pass in `rawData` also allows you to pass in
+configuration. For example to send http errors to one bugsnag project, you
+could do:
+
+```go
+bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"})
+```
+
+### GroupingHash
+
+If you need to override Bugsnag's grouping algorithm, you can set the
+`GroupingHash` in an `OnBeforeNotify`:
+
+```go
+bugsnag.OnBeforeNotify(
+ func (event *bugsnag.Event, config *bugsnag.Configuration) error {
+ event.GroupingHash = calculateGroupingHash(event)
+ return nil
+ })
+```
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go
new file mode 100644
index 00000000..73aa2d77
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/appengine.go
@@ -0,0 +1,76 @@
+// +build appengine
+
+package bugsnag
+
+import (
+ "appengine"
+ "appengine/urlfetch"
+ "appengine/user"
+ "fmt"
+ "log"
+ "net/http"
+)
+
+func defaultPanicHandler() {}
+
+func init() {
+ OnBeforeNotify(appengineMiddleware)
+}
+
+func appengineMiddleware(event *Event, config *Configuration) (err error) {
+ var c appengine.Context
+
+ for _, datum := range event.RawData {
+ if r, ok := datum.(*http.Request); ok {
+ c = appengine.NewContext(r)
+ break
+ } else if context, ok := datum.(appengine.Context); ok {
+ c = context
+ break
+ }
+ }
+
+ if c == nil {
+ return fmt.Errorf("No appengine context given")
+ }
+
+ // You can only use the builtin http library if you pay for appengine,
+ // so we use the appengine urlfetch service instead.
+ config.Transport = &urlfetch.Transport{
+ Context: c,
+ }
+
+ // Anything written to stderr/stdout is discarded, so lets log to the request.
+ config.Logger = log.New(appengineWriter{c}, config.Logger.Prefix(), config.Logger.Flags())
+
+ // Set the releaseStage appropriately
+ if config.ReleaseStage == "" {
+ if appengine.IsDevAppServer() {
+ config.ReleaseStage = "development"
+ } else {
+ config.ReleaseStage = "production"
+ }
+ }
+
+ if event.User == nil {
+ u := user.Current(c)
+ if u != nil {
+ event.User = &User{
+ Id: u.ID,
+ Email: u.Email,
+ }
+ }
+ }
+
+ return nil
+}
+
+// Convert an appengine.Context into an io.Writer so we can create a log.Logger.
+type appengineWriter struct {
+ appengine.Context
+}
+
+func (c appengineWriter) Write(b []byte) (int, error) {
+ c.Warningf(string(b))
+ return len(b), nil
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go
new file mode 100644
index 00000000..acd0fed3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag.go
@@ -0,0 +1,131 @@
+package bugsnag
+
+import (
+ "github.com/bugsnag/bugsnag-go/errors"
+ "log"
+ "net/http"
+ "os"
+ "sync"
+
+ // Fixes a bug with SHA-384 intermediate certs on some platforms.
+ // - https://github.com/bugsnag/bugsnag-go/issues/9
+ _ "crypto/sha512"
+)
+
+// The current version of bugsnag-go.
+const VERSION = "1.0.2"
+
+var once sync.Once
+var middleware middlewareStack
+
+// The configuration for the default bugsnag notifier.
+var Config Configuration
+
+var defaultNotifier = Notifier{&Config, nil}
+
+// Configure Bugsnag. The only required setting is the APIKey, which can be
+// obtained by clicking on "Settings" in your Bugsnag dashboard. This function
+// is also responsible for installing the global panic handler, so it should be
+// called as early as possible in your initialization process.
+func Configure(config Configuration) {
+ Config.update(&config)
+ once.Do(Config.PanicHandler)
+}
+
+// Notify sends an error to Bugsnag along with the current stack trace. The
+// rawData is used to send extra information along with the error. For example
+// you can pass the current http.Request to Bugsnag to see information about it
+// in the dashboard, or set the severity of the notification.
+func Notify(err error, rawData ...interface{}) error {
+ return defaultNotifier.Notify(errors.New(err, 1), rawData...)
+}
+
+// AutoNotify logs a panic on a goroutine and then repanics.
+// It should only be used in places that have existing panic handlers further
+// up the stack. See bugsnag.Recover(). The rawData is used to send extra
+// information along with any panics that are handled this way.
+// Usage: defer bugsnag.AutoNotify()
+func AutoNotify(rawData ...interface{}) {
+ if err := recover(); err != nil {
+ rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityError)
+ defaultNotifier.Notify(errors.New(err, 2), rawData...)
+ panic(err)
+ }
+}
+
+// Recover logs a panic on a goroutine and then recovers.
+// The rawData is used to send extra information along with
+// any panics that are handled this way
+// Usage: defer bugsnag.Recover()
+func Recover(rawData ...interface{}) {
+ if err := recover(); err != nil {
+ rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityWarning)
+ defaultNotifier.Notify(errors.New(err, 2), rawData...)
+ }
+}
+
+// OnBeforeNotify adds a callback to be run before a notification is sent to
+// Bugsnag. It can be used to modify the event or its MetaData. Changes made
+// to the configuration are local to notifying about this event. To prevent the
+// event from being sent to Bugsnag return an error, this error will be
+// returned from bugsnag.Notify() and the event will not be sent.
+func OnBeforeNotify(callback func(event *Event, config *Configuration) error) {
+ middleware.OnBeforeNotify(callback)
+}
+
+// Handler creates an http Handler that notifies Bugsnag any panics that
+// happen. It then repanics so that the default http Server panic handler can
+// handle the panic too. The rawData is used to send extra information along
+// with any panics that are handled this way.
+func Handler(h http.Handler, rawData ...interface{}) http.Handler {
+ notifier := New(rawData...)
+ if h == nil {
+ h = http.DefaultServeMux
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer notifier.AutoNotify(r)
+ h.ServeHTTP(w, r)
+ })
+}
+
+// HandlerFunc creates an http HandlerFunc that notifies Bugsnag about any
+// panics that happen. It then repanics so that the default http Server panic
+// handler can handle the panic too. The rawData is used to send extra
+// information along with any panics that are handled this way. If you have
+// already wrapped your http server using bugsnag.Handler() you don't also need
+// to wrap each HandlerFunc.
+func HandlerFunc(h http.HandlerFunc, rawData ...interface{}) http.HandlerFunc {
+ notifier := New(rawData...)
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ defer notifier.AutoNotify(r)
+ h(w, r)
+ }
+}
+
+func init() {
+ // Set up builtin middlewarez
+ OnBeforeNotify(httpRequestMiddleware)
+
+ // Default configuration
+ Config.update(&Configuration{
+ APIKey: "",
+ Endpoint: "https://notify.bugsnag.com/",
+ Hostname: "",
+ AppVersion: "",
+ ReleaseStage: "",
+ ParamsFilters: []string{"password", "secret"},
+ // * for app-engine
+ ProjectPackages: []string{"main*"},
+ NotifyReleaseStages: nil,
+ Logger: log.New(os.Stdout, log.Prefix(), log.Flags()),
+ PanicHandler: defaultPanicHandler,
+ Transport: http.DefaultTransport,
+ })
+
+ hostname, err := os.Hostname()
+ if err == nil {
+ Config.Hostname = hostname
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go
new file mode 100644
index 00000000..9f6a52ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/bugsnag_test.go
@@ -0,0 +1,461 @@
+package bugsnag
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/bitly/go-simplejson"
+)
+
+func TestConfigure(t *testing.T) {
+ Configure(Configuration{
+ APIKey: testAPIKey,
+ })
+
+ if Config.APIKey != testAPIKey {
+ t.Errorf("Setting APIKey didn't work")
+ }
+
+ if New().Config.APIKey != testAPIKey {
+ t.Errorf("Setting APIKey didn't work for new notifiers")
+ }
+}
+
+var postedJSON = make(chan []byte, 10)
+var testOnce sync.Once
+var testEndpoint string
+var testAPIKey = "166f5ad3590596f9aa8d601ea89af845"
+
+func startTestServer() {
+ testOnce.Do(func() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ panic(err)
+ }
+ postedJSON <- body
+ })
+
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ panic(err)
+ }
+ testEndpoint = "http://" + l.Addr().String() + "/"
+
+ go http.Serve(l, mux)
+ })
+}
+
+type _recurse struct {
+ *_recurse
+}
+
+func TestNotify(t *testing.T) {
+ startTestServer()
+
+ recurse := _recurse{}
+ recurse._recurse = &recurse
+
+ OnBeforeNotify(func(event *Event, config *Configuration) error {
+ if event.Context == "testing" {
+ event.GroupingHash = "lol"
+ }
+ return nil
+ })
+
+ Notify(fmt.Errorf("hello world"),
+ Configuration{
+ APIKey: testAPIKey,
+ Endpoint: testEndpoint,
+ ReleaseStage: "test",
+ AppVersion: "1.2.3",
+ Hostname: "web1",
+ ProjectPackages: []string{"github.com/bugsnag/bugsnag-go"},
+ },
+ User{Id: "123", Name: "Conrad", Email: "me@cirw.in"},
+ Context{"testing"},
+ MetaData{"test": {
+ "password": "sneaky",
+ "value": "able",
+ "broken": complex(1, 2),
+ "recurse": recurse,
+ }},
+ )
+
+ json, err := simplejson.NewJson(<-postedJSON)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if json.Get("apiKey").MustString() != testAPIKey {
+ t.Errorf("Wrong api key in payload")
+ }
+
+ if json.GetPath("notifier", "name").MustString() != "Bugsnag Go" {
+ t.Errorf("Wrong notifier name in payload")
+ }
+
+ event := json.Get("events").GetIndex(0)
+
+ for k, value := range map[string]string{
+ "payloadVersion": "2",
+ "severity": "warning",
+ "context": "testing",
+ "groupingHash": "lol",
+ "app.releaseStage": "test",
+ "app.version": "1.2.3",
+ "device.hostname": "web1",
+ "user.id": "123",
+ "user.name": "Conrad",
+ "user.email": "me@cirw.in",
+ "metaData.test.password": "[REDACTED]",
+ "metaData.test.value": "able",
+ "metaData.test.broken": "[complex128]",
+ "metaData.test.recurse._recurse": "[RECURSION]",
+ } {
+ key := strings.Split(k, ".")
+ if event.GetPath(key...).MustString() != value {
+ t.Errorf("Wrong %v: %v != %v", key, event.GetPath(key...).MustString(), value)
+ }
+ }
+
+ exception := event.Get("exceptions").GetIndex(0)
+
+ if exception.Get("message").MustString() != "hello world" {
+ t.Errorf("Wrong message in payload")
+ }
+
+ if exception.Get("errorClass").MustString() != "*errors.errorString" {
+ t.Errorf("Wrong errorClass in payload: %v", exception.Get("errorClass").MustString())
+ }
+
+ frame0 := exception.Get("stacktrace").GetIndex(0)
+ if frame0.Get("file").MustString() != "bugsnag_test.go" ||
+ frame0.Get("method").MustString() != "TestNotify" ||
+ frame0.Get("inProject").MustBool() != true ||
+ frame0.Get("lineNumber").MustInt() == 0 {
+ t.Errorf("Wrong frame0")
+ }
+
+ frame1 := exception.Get("stacktrace").GetIndex(1)
+
+ if frame1.Get("file").MustString() != "testing/testing.go" ||
+ frame1.Get("method").MustString() != "tRunner" ||
+ frame1.Get("inProject").MustBool() != false ||
+ frame1.Get("lineNumber").MustInt() == 0 {
+ t.Errorf("Wrong frame1")
+ }
+}
+
+func crashyHandler(w http.ResponseWriter, r *http.Request) {
+ c := make(chan int)
+ close(c)
+ c <- 1
+}
+
+func runCrashyServer(rawData ...interface{}) (net.Listener, error) {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", crashyHandler)
+ srv := http.Server{
+ Addr: l.Addr().String(),
+ Handler: Handler(mux, rawData...),
+ ErrorLog: log.New(ioutil.Discard, log.Prefix(), 0),
+ }
+
+ go srv.Serve(l)
+ return l, err
+}
+
+func TestHandler(t *testing.T) {
+ startTestServer()
+
+ l, err := runCrashyServer(Configuration{
+ APIKey: testAPIKey,
+ Endpoint: testEndpoint,
+ ProjectPackages: []string{"github.com/bugsnag/bugsnag-go"},
+ Logger: log.New(ioutil.Discard, log.Prefix(), log.Flags()),
+ }, SeverityInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ http.Get("http://" + l.Addr().String() + "/ok?foo=bar")
+ l.Close()
+
+ json, err := simplejson.NewJson(<-postedJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if json.Get("apiKey").MustString() != testAPIKey {
+ t.Errorf("Wrong api key in payload")
+ }
+
+ if json.GetPath("notifier", "name").MustString() != "Bugsnag Go" {
+ t.Errorf("Wrong notifier name in payload")
+ }
+
+ event := json.Get("events").GetIndex(0)
+
+ for k, value := range map[string]string{
+ "payloadVersion": "2",
+ "severity": "info",
+ "user.id": "127.0.0.1",
+ "metaData.Request.Url": "http://" + l.Addr().String() + "/ok?foo=bar",
+ "metaData.Request.Method": "GET",
+ } {
+ key := strings.Split(k, ".")
+ if event.GetPath(key...).MustString() != value {
+ t.Errorf("Wrong %v: %v != %v", key, event.GetPath(key...).MustString(), value)
+ }
+ }
+
+ if event.GetPath("metaData", "Request", "Params", "foo").GetIndex(0).MustString() != "bar" {
+ t.Errorf("missing GET params in request metadata")
+ }
+
+ if event.GetPath("metaData", "Headers", "Accept-Encoding").GetIndex(0).MustString() != "gzip" {
+ t.Errorf("missing GET params in request metadata: %v", event.GetPath("metaData", "Headers"))
+ }
+
+ exception := event.Get("exceptions").GetIndex(0)
+
+ if exception.Get("message").MustString() != "runtime error: send on closed channel" {
+ t.Errorf("Wrong message in payload: %v", exception.Get("message").MustString())
+ }
+
+ if exception.Get("errorClass").MustString() != "runtime.errorCString" {
+ t.Errorf("Wrong errorClass in payload: %v", exception.Get("errorClass").MustString())
+ }
+
+ // TODO:CI these are probably dependent on go version.
+ frame0 := exception.Get("stacktrace").GetIndex(0)
+ if frame0.Get("file").MustString() != "runtime/panic.c" ||
+ frame0.Get("method").MustString() != "panicstring" ||
+ frame0.Get("inProject").MustBool() != false ||
+ frame0.Get("lineNumber").MustInt() == 0 {
+ t.Errorf("Wrong frame0: %v", frame0)
+ }
+
+ frame3 := exception.Get("stacktrace").GetIndex(3)
+
+ if frame3.Get("file").MustString() != "bugsnag_test.go" ||
+ frame3.Get("method").MustString() != "crashyHandler" ||
+ frame3.Get("inProject").MustBool() != true ||
+ frame3.Get("lineNumber").MustInt() == 0 {
+ t.Errorf("Wrong frame3: %v", frame3)
+ }
+}
+
+func TestAutoNotify(t *testing.T) {
+
+ var panicked interface{}
+
+ func() {
+ defer func() {
+ panicked = recover()
+ }()
+ defer AutoNotify(Configuration{Endpoint: testEndpoint, APIKey: testAPIKey})
+
+ panic("eggs")
+ }()
+
+ if panicked.(string) != "eggs" {
+ t.Errorf("didn't re-panic")
+ }
+
+ json, err := simplejson.NewJson(<-postedJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ event := json.Get("events").GetIndex(0)
+
+ if event.Get("severity").MustString() != "error" {
+ t.Errorf("severity should be error")
+ }
+ exception := event.Get("exceptions").GetIndex(0)
+
+ if exception.Get("message").MustString() != "eggs" {
+ t.Errorf("caught wrong panic")
+ }
+}
+
+func TestRecover(t *testing.T) {
+ var panicked interface{}
+
+ func() {
+ defer func() {
+ panicked = recover()
+ }()
+ defer Recover(Configuration{Endpoint: testEndpoint, APIKey: testAPIKey})
+
+ panic("ham")
+ }()
+
+ if panicked != nil {
+ t.Errorf("re-panick'd")
+ }
+
+ json, err := simplejson.NewJson(<-postedJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ event := json.Get("events").GetIndex(0)
+
+ if event.Get("severity").MustString() != "warning" {
+ t.Errorf("severity should be warning")
+ }
+ exception := event.Get("exceptions").GetIndex(0)
+
+ if exception.Get("message").MustString() != "ham" {
+ t.Errorf("caught wrong panic")
+ }
+}
+
+func handleGet(w http.ResponseWriter, r *http.Request) {
+
+}
+
+var createAccount = handleGet
+
+type _job struct {
+ Name string
+ Process func()
+}
+
+func ExampleAutoNotify() interface{} {
+ return func(w http.ResponseWriter, request *http.Request) {
+ defer AutoNotify(request, Context{"createAccount"})
+
+ createAccount(w, request)
+ }
+}
+
+func ExampleRecover(job _job) {
+ go func() {
+ defer Recover(Context{job.Name}, SeverityWarning)
+
+ job.Process()
+ }()
+}
+
+func ExampleConfigure() {
+ Configure(Configuration{
+ APIKey: "YOUR_API_KEY_HERE",
+
+ ReleaseStage: "production",
+
+ // See Configuration{} for other fields
+ })
+}
+
+func ExampleHandler() {
+ // Set up your http handlers as usual
+ http.HandleFunc("/", handleGet)
+
+ // use bugsnag.Handler(nil) to wrap the default http handlers
+ // so that Bugsnag is automatically notified about panics.
+ http.ListenAndServe(":1234", Handler(nil))
+}
+
+func ExampleHandler_customServer() {
+ // If you're using a custom server, set the handlers explicitly.
+ http.HandleFunc("/", handleGet)
+
+ srv := http.Server{
+ Addr: ":1234",
+ ReadTimeout: 10 * time.Second,
+ // use bugsnag.Handler(nil) to wrap the default http handlers
+ // so that Bugsnag is automatically notified about panics.
+ Handler: Handler(nil),
+ }
+ srv.ListenAndServe()
+}
+
+func ExampleHandler_customHandlers() {
+ // If you're using custom handlers, wrap the handlers explicitly.
+ handler := http.NewServeMux()
+ http.HandleFunc("/", handleGet)
+ // use bugsnag.Handler(handler) to wrap the handlers so that Bugsnag is
+ // automatically notified about panics
+ http.ListenAndServe(":1234", Handler(handler))
+}
+
+func ExampleNotify() {
+ _, err := net.Listen("tcp", ":80")
+
+ if err != nil {
+ Notify(err)
+ }
+}
+
+func ExampleNotify_details(userID string) {
+ _, err := net.Listen("tcp", ":80")
+
+ if err != nil {
+ Notify(err,
+ // show as low-severity
+ SeverityInfo,
+ // set the context
+ Context{"createlistener"},
+ // pass the user id in to count users affected.
+ User{Id: userID},
+ // custom meta-data tab
+ MetaData{
+ "Listen": {
+ "Protocol": "tcp",
+ "Port": "80",
+ },
+ },
+ )
+ }
+
+}
+
+type Job struct {
+ Retry bool
+ UserId string
+ UserEmail string
+ Name string
+ Params map[string]string
+}
+
+func ExampleOnBeforeNotify() {
+ OnBeforeNotify(func(event *Event, config *Configuration) error {
+
+ // Search all the RawData for any *Job pointers that we're passed in
+ // to bugsnag.Notify() and friends.
+ for _, datum := range event.RawData {
+ if job, ok := datum.(*Job); ok {
+ // don't notify bugsnag about errors in retries
+ if job.Retry {
+ return fmt.Errorf("bugsnag middleware: not notifying about job retry")
+ }
+
+ // add the job as a tab on Bugsnag.com
+ event.MetaData.AddStruct("Job", job)
+
+ // set the user correctly
+ event.User = &User{Id: job.UserId, Email: job.UserEmail}
+ }
+ }
+
+ // continue notifying as normal
+ return nil
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go
new file mode 100644
index 00000000..7ff26e56
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration.go
@@ -0,0 +1,159 @@
+package bugsnag
+
+import (
+ "log"
+ "net/http"
+ "path/filepath"
+ "strings"
+)
+
+// Configuration sets up and customizes communication with the Bugsnag API.
+type Configuration struct {
+ // Your Bugsnag API key, e.g. "c9d60ae4c7e70c4b6c4ebd3e8056d2b8". You can
+ // find this by clicking Settings on https://bugsnag.com/.
+ APIKey string
+ // The Endpoint to notify about crashes. This defaults to
+ // "https://notify.bugsnag.com/", if you're using Bugsnag Enterprise then
+ // set it to your internal Bugsnag endpoint.
+ Endpoint string
+
+ // The current release stage. This defaults to "production" and is used to
+ // filter errors in the Bugsnag dashboard.
+ ReleaseStage string
+ // The currently running version of the app. This is used to filter errors
+ // in the Bugsnag dasboard. If you set this then Bugsnag will only re-open
+ // resolved errors if they happen in different app versions.
+ AppVersion string
+ // The hostname of the current server. This defaults to the return value of
+ // os.Hostname() and is graphed in the Bugsnag dashboard.
+ Hostname string
+
+ // The Release stages to notify in. If you set this then bugsnag-go will
+ // only send notifications to Bugsnag if the ReleaseStage is listed here.
+ NotifyReleaseStages []string
+
+ // packages that are part of your app. Bugsnag uses this to determine how
+ // to group errors and how to display them on your dashboard. You should
+ // include any packages that are part of your app, and exclude libraries
+ // and helpers. You can list wildcards here, and they'll be expanded using
+ // filepath.Glob. The default value is []string{"main*"}
+ ProjectPackages []string
+
+ // Any meta-data that matches these filters will be marked as [REDACTED]
+ // before sending a Notification to Bugsnag. It defaults to
+ // []string{"password", "secret"} so that request parameters like password,
+ // password_confirmation and auth_secret will not be sent to Bugsnag.
+ ParamsFilters []string
+
+ // The PanicHandler is used by Bugsnag to catch unhandled panics in your
+ // application. The default panicHandler uses mitchellh's panicwrap library,
+ // and you can disable this feature by passing an empty: func() {}
+ PanicHandler func()
+
+ // The logger that Bugsnag should log to. Uses the same defaults as go's
+ // builtin logging package. bugsnag-go logs whenever it notifies Bugsnag
+ // of an error, and when any error occurs inside the library itself.
+ Logger *log.Logger
+ // The http Transport to use, defaults to the default http Transport. This
+ // can be configured if you are in an environment like Google App Engine
+ // that has stringent conditions on making http requests.
+ Transport http.RoundTripper
+ // Whether bugsnag should notify synchronously. This defaults to false which
+ // causes bugsnag-go to spawn a new goroutine for each notification.
+ Synchronous bool
+ // TODO: remember to update the update() function when modifying this struct
+}
+
+func (config *Configuration) update(other *Configuration) *Configuration {
+ if other.APIKey != "" {
+ config.APIKey = other.APIKey
+ }
+ if other.Endpoint != "" {
+ config.Endpoint = other.Endpoint
+ }
+ if other.Hostname != "" {
+ config.Hostname = other.Hostname
+ }
+ if other.AppVersion != "" {
+ config.AppVersion = other.AppVersion
+ }
+ if other.ReleaseStage != "" {
+ config.ReleaseStage = other.ReleaseStage
+ }
+ if other.ParamsFilters != nil {
+ config.ParamsFilters = other.ParamsFilters
+ }
+ if other.ProjectPackages != nil {
+ config.ProjectPackages = other.ProjectPackages
+ }
+ if other.Logger != nil {
+ config.Logger = other.Logger
+ }
+ if other.NotifyReleaseStages != nil {
+ config.NotifyReleaseStages = other.NotifyReleaseStages
+ }
+ if other.PanicHandler != nil {
+ config.PanicHandler = other.PanicHandler
+ }
+ if other.Transport != nil {
+ config.Transport = other.Transport
+ }
+ if other.Synchronous {
+ config.Synchronous = true
+ }
+
+ return config
+}
+
+func (config *Configuration) merge(other *Configuration) *Configuration {
+ return config.clone().update(other)
+}
+
+func (config *Configuration) clone() *Configuration {
+ clone := *config
+ return &clone
+}
+
+func (config *Configuration) isProjectPackage(pkg string) bool {
+ for _, p := range config.ProjectPackages {
+ if match, _ := filepath.Match(p, pkg); match {
+ return true
+ }
+ }
+ return false
+}
+
+func (config *Configuration) stripProjectPackages(file string) string {
+ for _, p := range config.ProjectPackages {
+ if len(p) > 2 && p[len(p)-2] == '/' && p[len(p)-1] == '*' {
+ p = p[:len(p)-1]
+ } else {
+ p = p + "/"
+ }
+ if strings.HasPrefix(file, p) {
+ return strings.TrimPrefix(file, p)
+ }
+ }
+
+ return file
+}
+
+func (config *Configuration) log(fmt string, args ...interface{}) {
+ if config != nil && config.Logger != nil {
+ config.Logger.Printf(fmt, args...)
+ } else {
+ log.Printf(fmt, args...)
+ }
+}
+
+func (config *Configuration) notifyInReleaseStage() bool {
+ if config.NotifyReleaseStages == nil {
+ return true
+ }
+ for _, r := range config.NotifyReleaseStages {
+ if r == config.ReleaseStage {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go
new file mode 100644
index 00000000..2bd34889
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/configuration_test.go
@@ -0,0 +1,58 @@
+package bugsnag
+
+import (
+ "testing"
+)
+
+func TestNotifyReleaseStages(t *testing.T) {
+
+ var testCases = []struct {
+ stage string
+ configured []string
+ notify bool
+ msg string
+ }{
+ {
+ stage: "production",
+ notify: true,
+ msg: "Should notify in all release stages by default",
+ },
+ {
+ stage: "production",
+ configured: []string{"development", "production"},
+ notify: true,
+ msg: "Failed to notify in configured release stage",
+ },
+ {
+ stage: "staging",
+ configured: []string{"development", "production"},
+ notify: false,
+ msg: "Failed to prevent notification in excluded release stage",
+ },
+ }
+
+ for _, testCase := range testCases {
+ Configure(Configuration{ReleaseStage: testCase.stage, NotifyReleaseStages: testCase.configured})
+
+ if Config.notifyInReleaseStage() != testCase.notify {
+ t.Error(testCase.msg)
+ }
+ }
+}
+
+func TestProjectPackages(t *testing.T) {
+ Configure(Configuration{ProjectPackages: []string{"main", "github.com/ConradIrwin/*"}})
+ if !Config.isProjectPackage("main") {
+ t.Error("literal project package doesn't work")
+ }
+ if !Config.isProjectPackage("github.com/ConradIrwin/foo") {
+ t.Error("wildcard project package doesn't work")
+ }
+ if Config.isProjectPackage("runtime") {
+ t.Error("wrong packges being marked in project")
+ }
+ if Config.isProjectPackage("github.com/ConradIrwin/foo/bar") {
+ t.Error("wrong packges being marked in project")
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go
new file mode 100644
index 00000000..827e03b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/doc.go
@@ -0,0 +1,69 @@
+/*
+Package bugsnag captures errors in real-time and reports them to Bugsnag (http://bugsnag.com).
+
+Using bugsnag-go is a three-step process.
+
+1. As early as possible in your program configure the notifier with your APIKey. This sets up
+handling of panics that would otherwise crash your app.
+
+ func init() {
+ bugsnag.Configure(bugsnag.Configuration{
+ APIKey: "YOUR_API_KEY_HERE",
+ })
+ }
+
+2. Add bugsnag to places that already catch panics. For example you should add it to the HTTP server
+when you call ListenAndServer:
+
+ http.ListenAndServe(":8080", bugsnag.Handler(nil))
+
+If that's not possible, for example because you're using Google App Engine, you can also wrap each
+HTTP handler manually:
+
+ http.HandleFunc("/" bugsnag.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+ ...
+ })
+
+3. To notify Bugsnag of an error that is not a panic, pass it to bugsnag.Notify. This will also
+log the error message using the configured Logger.
+
+ if err != nil {
+ bugsnag.Notify(err)
+ }
+
+For detailed integration instructions see https://bugsnag.com/docs/notifiers/go.
+
+Configuration
+
+The only required configuration is the Bugsnag API key which can be obtained by clicking "Settings"
+on the top of https://bugsnag.com/ after signing up. We also recommend you set the ReleaseStage
+and AppVersion if these make sense for your deployment workflow.
+
+RawData
+
+If you need to attach extra data to Bugsnag notifications you can do that using
+the rawData mechanism. Most of the functions that send errors to Bugsnag allow
+you to pass in any number of interface{} values as rawData. The rawData can
+consist of the Severity, Context, User or MetaData types listed below, and
+there is also builtin support for *http.Requests.
+
+ bugsnag.Notify(err, bugsnag.SeverityError)
+
+If you want to add custom tabs to your bugsnag dashboard you can pass any value in as rawData,
+and then process it into the event's metadata using a bugsnag.OnBeforeNotify() hook.
+
+ bugsnag.Notify(err, account)
+
+ bugsnag.OnBeforeNotify(func (e *bugsnag.Event, c *bugsnag.Configuration) {
+ for datum := range e.RawData {
+ if account, ok := datum.(Account); ok {
+ e.MetaData.Add("account", "name", account.Name)
+ e.MetaData.Add("account", "url", account.URL)
+ }
+ }
+ })
+
+If necessary you can pass Configuration in as rawData, or modify the Configuration object passed
+into OnBeforeNotify hooks. Configuration passed in this way only affects the current notification.
+*/
+package bugsnag
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md
new file mode 100644
index 00000000..8d8e097a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/README.md
@@ -0,0 +1,6 @@
+Adds stacktraces to errors in golang.
+
+This was made to help build the Bugsnag notifier but can be used standalone if
+you like to have stacktraces on errors.
+
+See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs.
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go
new file mode 100644
index 00000000..0081c0a8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error.go
@@ -0,0 +1,90 @@
+// Package errors provides errors that have stack-traces.
+package errors
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "runtime"
+)
+
+// The maximum number of stackframes on any error.
+var MaxStackDepth = 50
+
+// Error is an error with an attached stacktrace. It can be used
+// wherever the builtin error interface is expected.
+type Error struct {
+ Err error
+ stack []uintptr
+ frames []StackFrame
+}
+
+// New makes an Error from the given value. If that value is already an
+// error then it will be used directly, if not, it will be passed to
+// fmt.Errorf("%v"). The skip parameter indicates how far up the stack
+// to start the stacktrace. 0 is from the current call, 1 from its caller, etc.
+func New(e interface{}, skip int) *Error {
+ var err error
+
+ switch e := e.(type) {
+ case *Error:
+ return e
+ case error:
+ err = e
+ default:
+ err = fmt.Errorf("%v", e)
+ }
+
+ stack := make([]uintptr, MaxStackDepth)
+ length := runtime.Callers(2+skip, stack[:])
+ return &Error{
+ Err: err,
+ stack: stack[:length],
+ }
+}
+
+// Errorf creates a new error with the given message. You can use it
+// as a drop-in replacement for fmt.Errorf() to provide descriptive
+// errors in return values.
+func Errorf(format string, a ...interface{}) *Error {
+ return New(fmt.Errorf(format, a...), 1)
+}
+
+// Error returns the underlying error's message.
+func (err *Error) Error() string {
+ return err.Err.Error()
+}
+
+// Stack returns the callstack formatted the same way that go does
+// in runtime/debug.Stack()
+func (err *Error) Stack() []byte {
+ buf := bytes.Buffer{}
+
+ for _, frame := range err.StackFrames() {
+ buf.WriteString(frame.String())
+ }
+
+ return buf.Bytes()
+}
+
+// StackFrames returns an array of frames containing information about the
+// stack.
+func (err *Error) StackFrames() []StackFrame {
+ if err.frames == nil {
+ err.frames = make([]StackFrame, len(err.stack))
+
+ for i, pc := range err.stack {
+ err.frames[i] = NewStackFrame(pc)
+ }
+ }
+
+ return err.frames
+}
+
+// TypeName returns the type this error. e.g. *errors.stringError.
+func (err *Error) TypeName() string {
+ if _, ok := err.Err.(uncaughtPanic); ok {
+ return "panic"
+ }
+ return reflect.TypeOf(err.Err).String()
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go
new file mode 100644
index 00000000..95232ea2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/error_test.go
@@ -0,0 +1,117 @@
+package errors
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "runtime/debug"
+ "testing"
+)
+
+func TestStackFormatMatches(t *testing.T) {
+
+ defer func() {
+ err := recover()
+ if err != 'a' {
+ t.Fatal(err)
+ }
+
+ bs := [][]byte{Errorf("hi").Stack(), debug.Stack()}
+
+ // Ignore the first line (as it contains the PC of the .Stack() call)
+ bs[0] = bytes.SplitN(bs[0], []byte("\n"), 2)[1]
+ bs[1] = bytes.SplitN(bs[1], []byte("\n"), 2)[1]
+
+ if bytes.Compare(bs[0], bs[1]) != 0 {
+ t.Errorf("Stack didn't match")
+ t.Errorf("%s", bs[0])
+ t.Errorf("%s", bs[1])
+ }
+ }()
+
+ a()
+}
+
+func TestSkipWorks(t *testing.T) {
+
+ defer func() {
+ err := recover()
+ if err != 'a' {
+ t.Fatal(err)
+ }
+
+ bs := [][]byte{New("hi", 2).Stack(), debug.Stack()}
+
+ // should skip four lines of debug.Stack()
+ bs[1] = bytes.SplitN(bs[1], []byte("\n"), 5)[4]
+
+ if bytes.Compare(bs[0], bs[1]) != 0 {
+ t.Errorf("Stack didn't match")
+ t.Errorf("%s", bs[0])
+ t.Errorf("%s", bs[1])
+ }
+ }()
+
+ a()
+}
+
+func TestNewError(t *testing.T) {
+
+ e := func() error {
+ return New("hi", 1)
+ }()
+
+ if e.Error() != "hi" {
+ t.Errorf("Constructor with a string failed")
+ }
+
+ if New(fmt.Errorf("yo"), 0).Error() != "yo" {
+ t.Errorf("Constructor with an error failed")
+ }
+
+ if New(e, 0) != e {
+ t.Errorf("Constructor with an Error failed")
+ }
+
+ if New(nil, 0).Error() != "" {
+ t.Errorf("Constructor with nil failed")
+ }
+}
+
+func ExampleErrorf(x int) (int, error) {
+ if x%2 == 1 {
+ return 0, Errorf("can only halve even numbers, got %d", x)
+ }
+ return x / 2, nil
+}
+
+func ExampleNewError() (error, error) {
+ // Wrap io.EOF with the current stack-trace and return it
+ return nil, New(io.EOF, 0)
+}
+
+func ExampleNewError_skip() {
+ defer func() {
+ if err := recover(); err != nil {
+ // skip 1 frame (the deferred function) and then return the wrapped err
+ err = New(err, 1)
+ }
+ }()
+}
+
+func ExampleError_Stack(err Error) {
+ fmt.Printf("Error: %s\n%s", err.Error(), err.Stack())
+}
+
+func a() error {
+ b(5)
+ return nil
+}
+
+func b(i int) {
+ c()
+}
+
+func c() {
+ panic('a')
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go
new file mode 100644
index 00000000..cc37052d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic.go
@@ -0,0 +1,127 @@
+package errors
+
+import (
+ "strconv"
+ "strings"
+)
+
+type uncaughtPanic struct{ message string }
+
+func (p uncaughtPanic) Error() string {
+ return p.message
+}
+
+// ParsePanic allows you to get an error object from the output of a go program
+// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap.
+func ParsePanic(text string) (*Error, error) {
+ lines := strings.Split(text, "\n")
+
+ state := "start"
+
+ var message string
+ var stack []StackFrame
+
+ for i := 0; i < len(lines); i++ {
+ line := lines[i]
+
+ if state == "start" {
+ if strings.HasPrefix(line, "panic: ") {
+ message = strings.TrimPrefix(line, "panic: ")
+ state = "seek"
+ } else {
+ return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line)
+ }
+
+ } else if state == "seek" {
+ if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") {
+ state = "parsing"
+ }
+
+ } else if state == "parsing" {
+ if line == "" {
+ state = "done"
+ break
+ }
+ createdBy := false
+ if strings.HasPrefix(line, "created by ") {
+ line = strings.TrimPrefix(line, "created by ")
+ createdBy = true
+ }
+
+ i++
+
+ if i >= len(lines) {
+ return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line)
+ }
+
+ frame, err := parsePanicFrame(line, lines[i], createdBy)
+ if err != nil {
+ return nil, err
+ }
+
+ stack = append(stack, *frame)
+ if createdBy {
+ state = "done"
+ break
+ }
+ }
+ }
+
+ if state == "done" || state == "parsing" {
+ return &Error{Err: uncaughtPanic{message}, frames: stack}, nil
+ }
+ return nil, Errorf("could not parse panic: %v", text)
+}
+
+// The lines we're passing look like this:
+//
+// main.(*foo).destruct(0xc208067e98)
+// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151
+func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) {
+ idx := strings.LastIndex(name, "(")
+ if idx == -1 && !createdBy {
+ return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name)
+ }
+ if idx != -1 {
+ name = name[:idx]
+ }
+ pkg := ""
+
+ if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
+ pkg += name[:lastslash] + "/"
+ name = name[lastslash+1:]
+ }
+ if period := strings.Index(name, "."); period >= 0 {
+ pkg += name[:period]
+ name = name[period+1:]
+ }
+
+ name = strings.Replace(name, "·", ".", -1)
+
+ if !strings.HasPrefix(line, "\t") {
+ return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line)
+ }
+
+ idx = strings.LastIndex(line, ":")
+ if idx == -1 {
+ return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line)
+ }
+ file := line[1:idx]
+
+ number := line[idx+1:]
+ if idx = strings.Index(number, " +"); idx > -1 {
+ number = number[:idx]
+ }
+
+ lno, err := strconv.ParseInt(number, 10, 32)
+ if err != nil {
+ return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line)
+ }
+
+ return &StackFrame{
+ File: file,
+ LineNumber: int(lno),
+ Package: pkg,
+ Name: name,
+ }, nil
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go
new file mode 100644
index 00000000..f9ed7845
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/parse_panic_test.go
@@ -0,0 +1,142 @@
+package errors
+
+import (
+ "reflect"
+ "testing"
+)
+
+var createdBy = `panic: hello!
+
+goroutine 54 [running]:
+runtime.panic(0x35ce40, 0xc208039db0)
+ /0/c/go/src/pkg/runtime/panic.c:279 +0xf5
+github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.func·001()
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:13 +0x74
+net/http.(*Server).Serve(0xc20806c780, 0x910c88, 0xc20803e168, 0x0, 0x0)
+ /0/c/go/src/pkg/net/http/server.go:1698 +0x91
+created by github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.App.Index
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:14 +0x3e
+
+goroutine 16 [IO wait]:
+net.runtime_pollWait(0x911c30, 0x72, 0x0)
+ /0/c/go/src/pkg/runtime/netpoll.goc:146 +0x66
+net.(*pollDesc).Wait(0xc2080ba990, 0x72, 0x0, 0x0)
+ /0/c/go/src/pkg/net/fd_poll_runtime.go:84 +0x46
+net.(*pollDesc).WaitRead(0xc2080ba990, 0x0, 0x0)
+ /0/c/go/src/pkg/net/fd_poll_runtime.go:89 +0x42
+net.(*netFD).accept(0xc2080ba930, 0x58be30, 0x0, 0x9103f0, 0x23)
+ /0/c/go/src/pkg/net/fd_unix.go:409 +0x343
+net.(*TCPListener).AcceptTCP(0xc20803e168, 0x8, 0x0, 0x0)
+ /0/c/go/src/pkg/net/tcpsock_posix.go:234 +0x5d
+net.(*TCPListener).Accept(0xc20803e168, 0x0, 0x0, 0x0, 0x0)
+ /0/c/go/src/pkg/net/tcpsock_posix.go:244 +0x4b
+github.com/revel/revel.Run(0xe6d9)
+ /0/go/src/github.com/revel/revel/server.go:113 +0x926
+main.main()
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/tmp/main.go:109 +0xe1a
+`
+
+var normalSplit = `panic: hello!
+
+goroutine 54 [running]:
+runtime.panic(0x35ce40, 0xc208039db0)
+ /0/c/go/src/pkg/runtime/panic.c:279 +0xf5
+github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.func·001()
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:13 +0x74
+net/http.(*Server).Serve(0xc20806c780, 0x910c88, 0xc20803e168, 0x0, 0x0)
+ /0/c/go/src/pkg/net/http/server.go:1698 +0x91
+
+goroutine 16 [IO wait]:
+net.runtime_pollWait(0x911c30, 0x72, 0x0)
+ /0/c/go/src/pkg/runtime/netpoll.goc:146 +0x66
+net.(*pollDesc).Wait(0xc2080ba990, 0x72, 0x0, 0x0)
+ /0/c/go/src/pkg/net/fd_poll_runtime.go:84 +0x46
+net.(*pollDesc).WaitRead(0xc2080ba990, 0x0, 0x0)
+ /0/c/go/src/pkg/net/fd_poll_runtime.go:89 +0x42
+net.(*netFD).accept(0xc2080ba930, 0x58be30, 0x0, 0x9103f0, 0x23)
+ /0/c/go/src/pkg/net/fd_unix.go:409 +0x343
+net.(*TCPListener).AcceptTCP(0xc20803e168, 0x8, 0x0, 0x0)
+ /0/c/go/src/pkg/net/tcpsock_posix.go:234 +0x5d
+net.(*TCPListener).Accept(0xc20803e168, 0x0, 0x0, 0x0, 0x0)
+ /0/c/go/src/pkg/net/tcpsock_posix.go:244 +0x4b
+github.com/revel/revel.Run(0xe6d9)
+ /0/go/src/github.com/revel/revel/server.go:113 +0x926
+main.main()
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/tmp/main.go:109 +0xe1a
+`
+
+var lastGoroutine = `panic: hello!
+
+goroutine 16 [IO wait]:
+net.runtime_pollWait(0x911c30, 0x72, 0x0)
+ /0/c/go/src/pkg/runtime/netpoll.goc:146 +0x66
+net.(*pollDesc).Wait(0xc2080ba990, 0x72, 0x0, 0x0)
+ /0/c/go/src/pkg/net/fd_poll_runtime.go:84 +0x46
+net.(*pollDesc).WaitRead(0xc2080ba990, 0x0, 0x0)
+ /0/c/go/src/pkg/net/fd_poll_runtime.go:89 +0x42
+net.(*netFD).accept(0xc2080ba930, 0x58be30, 0x0, 0x9103f0, 0x23)
+ /0/c/go/src/pkg/net/fd_unix.go:409 +0x343
+net.(*TCPListener).AcceptTCP(0xc20803e168, 0x8, 0x0, 0x0)
+ /0/c/go/src/pkg/net/tcpsock_posix.go:234 +0x5d
+net.(*TCPListener).Accept(0xc20803e168, 0x0, 0x0, 0x0, 0x0)
+ /0/c/go/src/pkg/net/tcpsock_posix.go:244 +0x4b
+github.com/revel/revel.Run(0xe6d9)
+ /0/go/src/github.com/revel/revel/server.go:113 +0x926
+main.main()
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/tmp/main.go:109 +0xe1a
+
+goroutine 54 [running]:
+runtime.panic(0x35ce40, 0xc208039db0)
+ /0/c/go/src/pkg/runtime/panic.c:279 +0xf5
+github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers.func·001()
+ /0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go:13 +0x74
+net/http.(*Server).Serve(0xc20806c780, 0x910c88, 0xc20803e168, 0x0, 0x0)
+ /0/c/go/src/pkg/net/http/server.go:1698 +0x91
+`
+
+var result = []StackFrame{
+ StackFrame{File: "/0/c/go/src/pkg/runtime/panic.c", LineNumber: 279, Name: "panic", Package: "runtime"},
+ StackFrame{File: "/0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go", LineNumber: 13, Name: "func.001", Package: "github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers"},
+ StackFrame{File: "/0/c/go/src/pkg/net/http/server.go", LineNumber: 1698, Name: "(*Server).Serve", Package: "net/http"},
+}
+
+var resultCreatedBy = append(result,
+ StackFrame{File: "/0/go/src/github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers/app.go", LineNumber: 14, Name: "App.Index", Package: "github.com/loopj/bugsnag-example-apps/go/revelapp/app/controllers", ProgramCounter: 0x0})
+
+func TestParsePanic(t *testing.T) {
+
+ todo := map[string]string{
+ "createdBy": createdBy,
+ "normalSplit": normalSplit,
+ "lastGoroutine": lastGoroutine,
+ }
+
+ for key, val := range todo {
+ Err, err := ParsePanic(val)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if Err.TypeName() != "panic" {
+ t.Errorf("Wrong type: %s", Err.TypeName())
+ }
+
+ if Err.Error() != "hello!" {
+ t.Errorf("Wrong message: %s", Err.TypeName())
+ }
+
+ if Err.StackFrames()[0].Func() != nil {
+ t.Errorf("Somehow managed to find a func...")
+ }
+
+ result := result
+ if key == "createdBy" {
+ result = resultCreatedBy
+ }
+
+ if !reflect.DeepEqual(Err.StackFrames(), result) {
+ t.Errorf("Wrong stack for %s: %#v", key, Err.StackFrames())
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go
new file mode 100644
index 00000000..4edadbc5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/errors/stackframe.go
@@ -0,0 +1,97 @@
+package errors
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "runtime"
+ "strings"
+)
+
+// A StackFrame contains all necessary information about to generate a line
+// in a callstack.
+type StackFrame struct {
+ File string
+ LineNumber int
+ Name string
+ Package string
+ ProgramCounter uintptr
+}
+
+// NewStackFrame popoulates a stack frame object from the program counter.
+func NewStackFrame(pc uintptr) (frame StackFrame) {
+
+ frame = StackFrame{ProgramCounter: pc}
+ if frame.Func() == nil {
+ return
+ }
+ frame.Package, frame.Name = packageAndName(frame.Func())
+
+ // pc -1 because the program counters we use are usually return addresses,
+ // and we want to show the line that corresponds to the function call
+ frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1)
+ return
+
+}
+
+// Func returns the function that this stackframe corresponds to
+func (frame *StackFrame) Func() *runtime.Func {
+ if frame.ProgramCounter == 0 {
+ return nil
+ }
+ return runtime.FuncForPC(frame.ProgramCounter)
+}
+
+// String returns the stackframe formatted in the same way as go does
+// in runtime/debug.Stack()
+func (frame *StackFrame) String() string {
+ str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter)
+
+ source, err := frame.SourceLine()
+ if err != nil {
+ return str
+ }
+
+ return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source)
+}
+
+// SourceLine gets the line of code (from File and Line) of the original source if possible
+func (frame *StackFrame) SourceLine() (string, error) {
+ data, err := ioutil.ReadFile(frame.File)
+
+ if err != nil {
+ return "", err
+ }
+
+ lines := bytes.Split(data, []byte{'\n'})
+ if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) {
+ return "???", nil
+ }
+ // -1 because line-numbers are 1 based, but our array is 0 based
+ return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil
+}
+
+func packageAndName(fn *runtime.Func) (string, string) {
+ name := fn.Name()
+ pkg := ""
+
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ // Since the package path might contains dots (e.g. code.google.com/...),
+ // we first remove the path prefix if there is one.
+ if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
+ pkg += name[:lastslash] + "/"
+ name = name[lastslash+1:]
+ }
+ if period := strings.Index(name, "."); period >= 0 {
+ pkg += name[:period]
+ name = name[period+1:]
+ }
+
+ name = strings.Replace(name, "·", ".", -1)
+ return pkg, name
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go
new file mode 100644
index 00000000..1586ef3f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/event.go
@@ -0,0 +1,134 @@
+package bugsnag
+
+import (
+ "strings"
+
+ "github.com/bugsnag/bugsnag-go/errors"
+)
+
+// Context is the context of the error in Bugsnag.
+// This can be passed to Notify, Recover or AutoNotify as rawData.
+type Context struct {
+ String string
+}
+
+// User represents the searchable user-data on Bugsnag. The Id is also used
+// to determine the number of users affected by a bug. This can be
+// passed to Notify, Recover or AutoNotify as rawData.
+type User struct {
+ Id string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Email string `json:"email,omitempty"`
+}
+
+// Sets the severity of the error on Bugsnag. These values can be
+// passed to Notify, Recover or AutoNotify as rawData.
+var (
+ SeverityError = severity{"error"}
+ SeverityWarning = severity{"warning"}
+ SeverityInfo = severity{"info"}
+)
+
+// The severity tag type, private so that people can only use Error,Warning,Info
+type severity struct {
+ String string
+}
+
+// The form of stacktrace that Bugsnag expects
+type stackFrame struct {
+ Method string `json:"method"`
+ File string `json:"file"`
+ LineNumber int `json:"lineNumber"`
+ InProject bool `json:"inProject,omitempty"`
+}
+
+// Event represents a payload of data that gets sent to Bugsnag.
+// This is passed to each OnBeforeNotify hook.
+type Event struct {
+
+ // The original error that caused this event, not sent to Bugsnag.
+ Error *errors.Error
+
+ // The rawData affecting this error, not sent to Bugsnag.
+ RawData []interface{}
+
+ // The error class to be sent to Bugsnag. This defaults to the type name of the Error, for
+ // example *error.String
+ ErrorClass string
+ // The error message to be sent to Bugsnag. This defaults to the return value of Error.Error()
+ Message string
+ // The stacktrrace of the error to be sent to Bugsnag.
+ Stacktrace []stackFrame
+
+ // The context to be sent to Bugsnag. This should be set to the part of the app that was running,
+ // e.g. for http requests, set it to the path.
+ Context string
+ // The severity of the error. Can be SeverityError, SeverityWarning or SeverityInfo.
+ Severity severity
+ // The grouping hash is used to override Bugsnag's grouping. Set this if you'd like all errors with
+ // the same grouping hash to group together in the dashboard.
+ GroupingHash string
+
+ // User data to send to Bugsnag. This is searchable on the dashboard.
+ User *User
+ // Other MetaData to send to Bugsnag. Appears as a set of tabbed tables in the dashboard.
+ MetaData MetaData
+}
+
+func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Event, *Configuration) {
+
+ config := notifier.Config
+ event := &Event{
+ Error: err,
+ RawData: append(notifier.RawData, rawData...),
+
+ ErrorClass: err.TypeName(),
+ Message: err.Error(),
+ Stacktrace: make([]stackFrame, len(err.StackFrames())),
+
+ Severity: SeverityWarning,
+
+ MetaData: make(MetaData),
+ }
+
+ for _, datum := range event.RawData {
+ switch datum := datum.(type) {
+ case severity:
+ event.Severity = datum
+
+ case Context:
+ event.Context = datum.String
+
+ case Configuration:
+ config = config.merge(&datum)
+
+ case MetaData:
+ event.MetaData.Update(datum)
+
+ case User:
+ event.User = &datum
+ }
+ }
+
+ for i, frame := range err.StackFrames() {
+ file := frame.File
+ inProject := config.isProjectPackage(frame.Package)
+
+ // remove $GOROOT and $GOHOME from other frames
+ if idx := strings.Index(file, frame.Package); idx > -1 {
+ file = file[idx:]
+ }
+ if inProject {
+ file = config.stripProjectPackages(file)
+ }
+
+ event.Stacktrace[i] = stackFrame{
+ Method: frame.Name,
+ File: file,
+ LineNumber: frame.LineNumber,
+ InProject: inProject,
+ }
+ }
+
+ return event, config
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go
new file mode 100644
index 00000000..45be38fa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/json_tags.go
@@ -0,0 +1,43 @@
+// The code is stripped from:
+// http://golang.org/src/pkg/encoding/json/tags.go?m=text
+
+package bugsnag
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go
new file mode 100644
index 00000000..ffe64e21
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata.go
@@ -0,0 +1,185 @@
+package bugsnag
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// MetaData is added to the Bugsnag dashboard in tabs. Each tab is
+// a map of strings -> values. You can pass MetaData to Notify, Recover
+// and AutoNotify as rawData.
+type MetaData map[string]map[string]interface{}
+
+// Update the meta-data with more information. Tabs are merged together such
+// that unique keys from both sides are preserved, and duplicate keys end up
+// with the provided values.
+func (meta MetaData) Update(other MetaData) {
+ for name, tab := range other {
+
+ if meta[name] == nil {
+ meta[name] = make(map[string]interface{})
+ }
+
+ for key, value := range tab {
+ meta[name][key] = value
+ }
+ }
+}
+
+// Add creates a tab of Bugsnag meta-data.
+// If the tab doesn't yet exist it will be created.
+// If the key already exists, it will be overwritten.
+func (meta MetaData) Add(tab string, key string, value interface{}) {
+ if meta[tab] == nil {
+ meta[tab] = make(map[string]interface{})
+ }
+
+ meta[tab][key] = value
+}
+
+// AddStruct creates a tab of Bugsnag meta-data.
+// The struct will be converted to an Object using the
+// reflect library so any private fields will not be exported.
+// As a safety measure, if you pass a non-struct the value will be
+// sent to Bugsnag under the "Extra data" tab.
+func (meta MetaData) AddStruct(tab string, obj interface{}) {
+ val := sanitizer{}.Sanitize(obj)
+ content, ok := val.(map[string]interface{})
+ if ok {
+ meta[tab] = content
+ } else {
+ // Wasn't a struct
+ meta.Add("Extra data", tab, obj)
+ }
+
+}
+
+// Remove any values from meta-data that have keys matching the filters,
+// and any that are recursive data-structures
+func (meta MetaData) sanitize(filters []string) interface{} {
+ return sanitizer{
+ Filters: filters,
+ Seen: make([]interface{}, 0),
+ }.Sanitize(meta)
+
+}
+
+// The sanitizer is used to remove filtered params and recursion from meta-data.
+type sanitizer struct {
+ Filters []string
+ Seen []interface{}
+}
+
+func (s sanitizer) Sanitize(data interface{}) interface{} {
+ for _, s := range s.Seen {
+ // TODO: we don't need deep equal here, just type-ignoring equality
+ if reflect.DeepEqual(data, s) {
+ return "[RECURSION]"
+ }
+ }
+
+ // Sanitizers are passed by value, so we can modify s and it only affects
+ // s.Seen for nested calls.
+ s.Seen = append(s.Seen, data)
+
+ t := reflect.TypeOf(data)
+ v := reflect.ValueOf(data)
+
+ switch t.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64:
+ return data
+
+ case reflect.String:
+ return data
+
+ case reflect.Interface, reflect.Ptr:
+ return s.Sanitize(v.Elem().Interface())
+
+ case reflect.Array, reflect.Slice:
+ ret := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ ret[i] = s.Sanitize(v.Index(i).Interface())
+ }
+ return ret
+
+ case reflect.Map:
+ return s.sanitizeMap(v)
+
+ case reflect.Struct:
+ return s.sanitizeStruct(v, t)
+
+ // Things JSON can't serialize:
+ // case t.Chan, t.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
+ default:
+ return "[" + t.String() + "]"
+
+ }
+
+}
+
+func (s sanitizer) sanitizeMap(v reflect.Value) interface{} {
+ ret := make(map[string]interface{})
+
+ for _, key := range v.MapKeys() {
+ val := s.Sanitize(v.MapIndex(key).Interface())
+ newKey := fmt.Sprintf("%v", key.Interface())
+
+ if s.shouldRedact(newKey) {
+ val = "[REDACTED]"
+ }
+
+ ret[newKey] = val
+ }
+
+ return ret
+}
+
+func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} {
+ ret := make(map[string]interface{})
+
+ for i := 0; i < v.NumField(); i++ {
+
+ val := v.Field(i)
+ // Don't export private fields
+ if !val.CanInterface() {
+ continue
+ }
+
+ name := t.Field(i).Name
+ var opts tagOptions
+
+ // Parse JSON tags. Supports name and "omitempty"
+ if jsonTag := t.Field(i).Tag.Get("json"); len(jsonTag) != 0 {
+ name, opts = parseTag(jsonTag)
+ }
+
+ if s.shouldRedact(name) {
+ ret[name] = "[REDACTED]"
+ } else {
+ sanitized := s.Sanitize(val.Interface())
+ if str, ok := sanitized.(string); ok {
+ if !(opts.Contains("omitempty") && len(str) == 0) {
+ ret[name] = str
+ }
+ } else {
+ ret[name] = sanitized
+ }
+
+ }
+ }
+
+ return ret
+}
+
+func (s sanitizer) shouldRedact(key string) bool {
+ for _, filter := range s.Filters {
+ if strings.Contains(strings.ToLower(filter), strings.ToLower(key)) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go
new file mode 100644
index 00000000..37bfaee5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/metadata_test.go
@@ -0,0 +1,182 @@
+package bugsnag
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+
+ "github.com/bugsnag/bugsnag-go/errors"
+)
+
+type _account struct {
+ ID string
+ Name string
+ Plan struct {
+ Premium bool
+ }
+ Password string
+ secret string
+ Email string `json:"email"`
+ EmptyEmail string `json:"emptyemail,omitempty"`
+ NotEmptyEmail string `json:"not_empty_email,omitempty"`
+}
+
+type _broken struct {
+ Me *_broken
+ Data string
+}
+
+var account = _account{}
+var notifier = New(Configuration{})
+
+func TestMetaDataAdd(t *testing.T) {
+ m := MetaData{
+ "one": {
+ "key": "value",
+ "override": false,
+ }}
+
+ m.Add("one", "override", true)
+ m.Add("one", "new", "key")
+ m.Add("new", "tab", account)
+
+ m.AddStruct("lol", "not really a struct")
+ m.AddStruct("account", account)
+
+ if !reflect.DeepEqual(m, MetaData{
+ "one": {
+ "key": "value",
+ "override": true,
+ "new": "key",
+ },
+ "new": {
+ "tab": account,
+ },
+ "Extra data": {
+ "lol": "not really a struct",
+ },
+ "account": {
+ "ID": "",
+ "Name": "",
+ "Plan": map[string]interface{}{
+ "Premium": false,
+ },
+ "Password": "",
+ "email": "",
+ },
+ }) {
+ t.Errorf("metadata.Add didn't work: %#v", m)
+ }
+}
+
+func TestMetaDataUpdate(t *testing.T) {
+
+ m := MetaData{
+ "one": {
+ "key": "value",
+ "override": false,
+ }}
+
+ m.Update(MetaData{
+ "one": {
+ "override": true,
+ "new": "key",
+ },
+ "new": {
+ "tab": account,
+ },
+ })
+
+ if !reflect.DeepEqual(m, MetaData{
+ "one": {
+ "key": "value",
+ "override": true,
+ "new": "key",
+ },
+ "new": {
+ "tab": account,
+ },
+ }) {
+ t.Errorf("metadata.Update didn't work: %#v", m)
+ }
+}
+
+func TestMetaDataSanitize(t *testing.T) {
+
+ var broken = _broken{}
+ broken.Me = &broken
+ broken.Data = "ohai"
+ account.Name = "test"
+ account.ID = "test"
+ account.secret = "hush"
+ account.Email = "example@example.com"
+ account.EmptyEmail = ""
+ account.NotEmptyEmail = "not_empty_email@example.com"
+
+ m := MetaData{
+ "one": {
+ "bool": true,
+ "int": 7,
+ "float": 7.1,
+ "complex": complex(1, 1),
+ "func": func() {},
+ "unsafe": unsafe.Pointer(broken.Me),
+ "string": "string",
+ "password": "secret",
+ "array": []hash{{
+ "creditcard": "1234567812345678",
+ "broken": broken,
+ }},
+ "broken": broken,
+ "account": account,
+ },
+ }
+
+ n := m.sanitize([]string{"password", "creditcard"})
+
+ if !reflect.DeepEqual(n, map[string]interface{}{
+ "one": map[string]interface{}{
+ "bool": true,
+ "int": 7,
+ "float": 7.1,
+ "complex": "[complex128]",
+ "string": "string",
+ "unsafe": "[unsafe.Pointer]",
+ "func": "[func()]",
+ "password": "[REDACTED]",
+ "array": []interface{}{map[string]interface{}{
+ "creditcard": "[REDACTED]",
+ "broken": map[string]interface{}{
+ "Me": "[RECURSION]",
+ "Data": "ohai",
+ },
+ }},
+ "broken": map[string]interface{}{
+ "Me": "[RECURSION]",
+ "Data": "ohai",
+ },
+ "account": map[string]interface{}{
+ "ID": "test",
+ "Name": "test",
+ "Plan": map[string]interface{}{
+ "Premium": false,
+ },
+ "Password": "[REDACTED]",
+ "email": "example@example.com",
+ "not_empty_email": "not_empty_email@example.com",
+ },
+ },
+ }) {
+ t.Errorf("metadata.Sanitize didn't work: %#v", n)
+ }
+
+}
+
+func ExampleMetaData() {
+ notifier.Notify(errors.Errorf("hi world"),
+ MetaData{"Account": {
+ "id": account.ID,
+ "name": account.Name,
+ "paying?": account.Plan.Premium,
+ }})
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go
new file mode 100644
index 00000000..266d5e46
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware.go
@@ -0,0 +1,96 @@
+package bugsnag
+
+import (
+ "net/http"
+ "strings"
+)
+
+type (
+ beforeFunc func(*Event, *Configuration) error
+
+ // MiddlewareStacks keep middleware in the correct order. They are
+ // called in reverse order, so if you add a new middleware it will
+ // be called before all existing middleware.
+ middlewareStack struct {
+ before []beforeFunc
+ }
+)
+
+// AddMiddleware adds a new middleware to the outside of the existing ones,
+// when the middlewareStack is Run it will be run before all middleware that
+// have been added before.
+func (stack *middlewareStack) OnBeforeNotify(middleware beforeFunc) {
+ stack.before = append(stack.before, middleware)
+}
+
+// Run causes all the middleware to be run. If they all permit it the next callback
+// will be called with all the middleware on the stack.
+func (stack *middlewareStack) Run(event *Event, config *Configuration, next func() error) error {
+ // run all the before filters in reverse order
+ for i := range stack.before {
+ before := stack.before[len(stack.before)-i-1]
+
+ err := stack.runBeforeFilter(before, event, config)
+ if err != nil {
+ return err
+ }
+ }
+
+ return next()
+}
+
+func (stack *middlewareStack) runBeforeFilter(f beforeFunc, event *Event, config *Configuration) error {
+ defer func() {
+ if err := recover(); err != nil {
+ config.log("bugsnag/middleware: unexpected panic: %v", err)
+ }
+ }()
+
+ return f(event, config)
+}
+
+// catchMiddlewarePanic is used to log any panics that happen inside Middleware,
+// we wouldn't want to not notify Bugsnag in this case.
+func catchMiddlewarePanic(event *Event, config *Configuration, next func() error) {
+}
+
+// httpRequestMiddleware is added OnBeforeNotify by default. It takes information
+// from an http.Request passed in as rawData, and adds it to the Event. You can
+// use this as a template for writing your own Middleware.
+func httpRequestMiddleware(event *Event, config *Configuration) error {
+ for _, datum := range event.RawData {
+ if request, ok := datum.(*http.Request); ok {
+ proto := "http://"
+ if request.TLS != nil {
+ proto = "https://"
+ }
+
+ event.MetaData.Update(MetaData{
+ "Request": {
+ "RemoteAddr": request.RemoteAddr,
+ "Method": request.Method,
+ "Url": proto + request.Host + request.RequestURI,
+ "Params": request.URL.Query(),
+ },
+ })
+
+ // Add headers as a separate tab.
+ event.MetaData.AddStruct("Headers", request.Header)
+
+ // Default context to Path
+ if event.Context == "" {
+ event.Context = request.URL.Path
+ }
+
+ // Default user.id to IP so that users-affected works.
+ if event.User == nil {
+ ip := request.RemoteAddr
+ if idx := strings.LastIndex(ip, ":"); idx != -1 {
+ ip = ip[:idx]
+ }
+ event.User = &User{Id: ip}
+ }
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go
new file mode 100644
index 00000000..b1ef77a8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/middleware_test.go
@@ -0,0 +1,88 @@
+package bugsnag
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "reflect"
+ "testing"
+)
+
+func TestMiddlewareOrder(t *testing.T) {
+
+ result := make([]int, 0, 7)
+ stack := middlewareStack{}
+ stack.OnBeforeNotify(func(e *Event, c *Configuration) error {
+ result = append(result, 2)
+ return nil
+ })
+ stack.OnBeforeNotify(func(e *Event, c *Configuration) error {
+ result = append(result, 1)
+ return nil
+ })
+ stack.OnBeforeNotify(func(e *Event, c *Configuration) error {
+ result = append(result, 0)
+ return nil
+ })
+
+ stack.Run(nil, nil, func() error {
+ result = append(result, 3)
+ return nil
+ })
+
+ if !reflect.DeepEqual(result, []int{0, 1, 2, 3}) {
+ t.Errorf("unexpected middleware order %v", result)
+ }
+}
+
+func TestBeforeNotifyReturnErr(t *testing.T) {
+
+ stack := middlewareStack{}
+ err := fmt.Errorf("test")
+
+ stack.OnBeforeNotify(func(e *Event, c *Configuration) error {
+ return err
+ })
+
+ called := false
+
+ e := stack.Run(nil, nil, func() error {
+ called = true
+ return nil
+ })
+
+ if e != err {
+ t.Errorf("Middleware didn't return the error")
+ }
+
+ if called == true {
+ t.Errorf("Notify was called when BeforeNotify returned False")
+ }
+}
+
+func TestBeforeNotifyPanic(t *testing.T) {
+
+ stack := middlewareStack{}
+
+ stack.OnBeforeNotify(func(e *Event, c *Configuration) error {
+ panic("oops")
+ })
+
+ called := false
+ b := &bytes.Buffer{}
+
+ stack.Run(nil, &Configuration{Logger: log.New(b, log.Prefix(), 0)}, func() error {
+ called = true
+ return nil
+ })
+
+ logged := b.String()
+
+ if logged != "bugsnag/middleware: unexpected panic: oops\n" {
+ t.Errorf("Logged: %s", logged)
+ }
+
+ if called == false {
+ t.Errorf("Notify was not called when BeforeNotify panicked")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go
new file mode 100644
index 00000000..6b108178
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/notifier.go
@@ -0,0 +1,95 @@
+package bugsnag
+
+import (
+ "fmt"
+
+ "github.com/bugsnag/bugsnag-go/errors"
+)
+
+// Notifier sends errors to Bugsnag.
+type Notifier struct {
+ Config *Configuration
+ RawData []interface{}
+}
+
+// New creates a new notifier.
+// You can pass an instance of bugsnag.Configuration in rawData to change the configuration.
+// Other values of rawData will be passed to Notify.
+func New(rawData ...interface{}) *Notifier {
+ config := Config.clone()
+ for i, datum := range rawData {
+ if c, ok := datum.(Configuration); ok {
+ config.update(&c)
+ rawData[i] = nil
+ }
+ }
+
+ return &Notifier{
+ Config: config,
+ RawData: rawData,
+ }
+}
+
+// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to
+// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context,
+// or bugsnag.MetaData.
+func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) {
+ event, config := newEvent(errors.New(err, 1), rawData, notifier)
+
+ // Never block, start throwing away errors if we have too many.
+ e = middleware.Run(event, config, func() error {
+ config.log("notifying bugsnag: %s", event.Message)
+ if config.notifyInReleaseStage() {
+ if config.Synchronous {
+ return (&payload{event, config}).deliver()
+ }
+ go (&payload{event, config}).deliver()
+ return nil
+ }
+ return fmt.Errorf("not notifying in %s", config.ReleaseStage)
+ })
+
+ if e != nil {
+ config.log("bugsnag.Notify: %v", e)
+ }
+ return e
+}
+
+// AutoNotify notifies Bugsnag of any panics, then repanics.
+// It sends along any rawData that gets passed in.
+// Usage: defer AutoNotify()
+func (notifier *Notifier) AutoNotify(rawData ...interface{}) {
+ if err := recover(); err != nil {
+ rawData = notifier.addDefaultSeverity(rawData, SeverityError)
+ notifier.Notify(errors.New(err, 2), rawData...)
+ panic(err)
+ }
+}
+
+// Recover logs any panics, then recovers.
+// It sends along any rawData that gets passed in.
+// Usage: defer Recover()
+func (notifier *Notifier) Recover(rawData ...interface{}) {
+ if err := recover(); err != nil {
+ rawData = notifier.addDefaultSeverity(rawData, SeverityWarning)
+ notifier.Notify(errors.New(err, 2), rawData...)
+ }
+}
+
+func (notifier *Notifier) dontPanic() {
+ if err := recover(); err != nil {
+ notifier.Config.log("bugsnag/notifier.Notify: panic! %s", err)
+ }
+}
+
+// Add a severity to raw data only if the default is not set.
+func (notifier *Notifier) addDefaultSeverity(rawData []interface{}, s severity) []interface{} {
+
+ for _, datum := range append(notifier.RawData, rawData...) {
+ if _, ok := datum.(severity); ok {
+ return rawData
+ }
+ }
+
+ return append(rawData, s)
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go
new file mode 100644
index 00000000..14fb9fa8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap.go
@@ -0,0 +1,27 @@
+// +build !appengine
+
+package bugsnag
+
+import (
+ "github.com/bugsnag/panicwrap"
+ "github.com/bugsnag/bugsnag-go/errors"
+)
+
+// NOTE: this function does not return when you call it, instead it
+// re-exec()s the current process with panic monitoring.
+func defaultPanicHandler() {
+ defer defaultNotifier.dontPanic()
+
+ err := panicwrap.BasicMonitor(func(output string) {
+ toNotify, err := errors.ParsePanic(output)
+
+ if err != nil {
+ defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err)
+ }
+ Notify(toNotify, SeverityError, Configuration{Synchronous: true})
+ })
+
+ if err != nil {
+ defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go
new file mode 100644
index 00000000..247c3f45
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/panicwrap_test.go
@@ -0,0 +1,79 @@
+// +build !appengine
+
+package bugsnag
+
+import (
+ "github.com/bitly/go-simplejson"
+ "github.com/mitchellh/osext"
+ "os"
+ "os/exec"
+ "testing"
+ "time"
+)
+
+func TestPanicHandler(t *testing.T) {
+ startTestServer()
+
+ exePath, err := osext.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Use the same trick as panicwrap() to re-run ourselves.
+ // In the init() block below, we will then panic.
+ cmd := exec.Command(exePath, os.Args[1:]...)
+ cmd.Env = append(os.Environ(), "BUGSNAG_API_KEY="+testAPIKey, "BUGSNAG_ENDPOINT="+testEndpoint, "please_panic=please_panic")
+
+ if err = cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = cmd.Wait(); err.Error() != "exit status 2" {
+ t.Fatal(err)
+ }
+
+ json, err := simplejson.NewJson(<-postedJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ event := json.Get("events").GetIndex(0)
+
+ if event.Get("severity").MustString() != "error" {
+ t.Errorf("severity should be error")
+ }
+ exception := event.Get("exceptions").GetIndex(0)
+
+ if exception.Get("message").MustString() != "ruh roh" {
+ t.Errorf("caught wrong panic")
+ }
+
+ if exception.Get("errorClass").MustString() != "panic" {
+ t.Errorf("caught wrong panic")
+ }
+
+ frame := exception.Get("stacktrace").GetIndex(1)
+
+ // Yeah, we just caught a panic from the init() function below and sent it to the server running above (mindblown)
+ if frame.Get("inProject").MustBool() != true ||
+ frame.Get("file").MustString() != "panicwrap_test.go" ||
+ frame.Get("method").MustString() != "panick" ||
+ frame.Get("lineNumber").MustInt() == 0 {
+ t.Errorf("stack trace seemed wrong")
+ }
+}
+
+func init() {
+ if os.Getenv("please_panic") != "" {
+ Configure(Configuration{APIKey: os.Getenv("BUGSNAG_API_KEY"), Endpoint: os.Getenv("BUGSNAG_ENDPOINT"), ProjectPackages: []string{"github.com/bugsnag/bugsnag-go"}})
+ go func() {
+ panick()
+ }()
+ // Plenty of time to crash, it shouldn't need any of it.
+ time.Sleep(1 * time.Second)
+ }
+}
+
+func panick() {
+ panic("ruh roh")
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go
new file mode 100644
index 00000000..a516a5d2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/payload.go
@@ -0,0 +1,96 @@
+package bugsnag
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+type payload struct {
+ *Event
+ *Configuration
+}
+
+type hash map[string]interface{}
+
+func (p *payload) deliver() error {
+
+ if len(p.APIKey) != 32 {
+ return fmt.Errorf("bugsnag/payload.deliver: invalid api key")
+ }
+
+ buf, err := json.Marshal(p)
+
+ if err != nil {
+ return fmt.Errorf("bugsnag/payload.deliver: %v", err)
+ }
+
+ client := http.Client{
+ Transport: p.Transport,
+ }
+
+ resp, err := client.Post(p.Endpoint, "application/json", bytes.NewBuffer(buf))
+
+ if err != nil {
+ return fmt.Errorf("bugsnag/payload.deliver: %v", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s\n", resp.Status)
+ }
+
+ return nil
+}
+
+func (p *payload) MarshalJSON() ([]byte, error) {
+
+ data := hash{
+ "apiKey": p.APIKey,
+
+ "notifier": hash{
+ "name": "Bugsnag Go",
+ "url": "https://github.com/bugsnag/bugsnag-go",
+ "version": VERSION,
+ },
+
+ "events": []hash{
+ {
+ "payloadVersion": "2",
+ "exceptions": []hash{
+ {
+ "errorClass": p.ErrorClass,
+ "message": p.Message,
+ "stacktrace": p.Stacktrace,
+ },
+ },
+ "severity": p.Severity.String,
+ "app": hash{
+ "releaseStage": p.ReleaseStage,
+ },
+ "user": p.User,
+ "metaData": p.MetaData.sanitize(p.ParamsFilters),
+ },
+ },
+ }
+
+ event := data["events"].([]hash)[0]
+
+ if p.Context != "" {
+ event["context"] = p.Context
+ }
+ if p.GroupingHash != "" {
+ event["groupingHash"] = p.GroupingHash
+ }
+ if p.Hostname != "" {
+ event["device"] = hash{
+ "hostname": p.Hostname,
+ }
+ }
+ if p.AppVersion != "" {
+ event["app"].(hash)["version"] = p.AppVersion
+ }
+ return json.Marshal(data)
+
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go
new file mode 100644
index 00000000..149b010c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/bugsnag-go/revel/bugsnagrevel.go
@@ -0,0 +1,60 @@
+// Package bugsnagrevel adds Bugsnag to revel.
+// It lets you pass *revel.Controller into bugsnag.Notify(),
+// and provides a Filter to catch errors.
+package bugsnagrevel
+
+import (
+ "strings"
+ "sync"
+
+ "github.com/bugsnag/bugsnag-go"
+ "github.com/revel/revel"
+)
+
+var once sync.Once
+
+// Filter should be added to the filter chain just after the PanicFilter.
+// It sends errors to Bugsnag automatically. Configuration is read out of
+// conf/app.conf, you should set bugsnag.apikey, and can also set
+// bugsnag.endpoint, bugsnag.releasestage, bugsnag.appversion,
+// bugsnag.projectroot, bugsnag.projectpackages if needed.
+func Filter(c *revel.Controller, fc []revel.Filter) {
+ defer bugsnag.AutoNotify(c)
+ fc[0](c, fc[1:])
+}
+
+// Add support to bugsnag for reading data out of *revel.Controllers
+func middleware(event *bugsnag.Event, config *bugsnag.Configuration) error {
+ for _, datum := range event.RawData {
+ if controller, ok := datum.(*revel.Controller); ok {
+ // make the request visible to the builtin HttpMIddleware
+ event.RawData = append(event.RawData, controller.Request.Request)
+ event.Context = controller.Action
+ event.MetaData.AddStruct("Session", controller.Session)
+ }
+ }
+
+ return nil
+}
+
+func init() {
+ revel.OnAppStart(func() {
+ bugsnag.OnBeforeNotify(middleware)
+
+ var projectPackages []string
+ if packages, ok := revel.Config.String("bugsnag.projectpackages"); ok {
+ projectPackages = strings.Split(packages, ",")
+ } else {
+ projectPackages = []string{revel.ImportPath + "/app/*", revel.ImportPath + "/app"}
+ }
+
+ bugsnag.Configure(bugsnag.Configuration{
+ APIKey: revel.Config.StringDefault("bugsnag.apikey", ""),
+ Endpoint: revel.Config.StringDefault("bugsnag.endpoint", ""),
+ AppVersion: revel.Config.StringDefault("bugsnag.appversion", ""),
+ ReleaseStage: revel.Config.StringDefault("bugsnag.releasestage", revel.RunMode),
+ ProjectPackages: projectPackages,
+ Logger: revel.ERROR,
+ })
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE b/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE
new file mode 100644
index 00000000..18527a28
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2012 Daniel Theophanes
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go b/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go
new file mode 100644
index 00000000..37efbb22
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/osext.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extensions to the standard "os" package.
+package osext
+
+import "path/filepath"
+
+// Executable returns an absolute path that can be used to
+// re-invoke the current program.
+// It may not be valid after the current program exits.
+func Executable() (string, error) {
+ p, err := executable()
+ return filepath.Clean(p), err
+}
+
+// Returns same path as Executable, returns just the folder
+// path. Excludes the executable name.
+func ExecutableFolder() (string, error) {
+ p, err := Executable()
+ if err != nil {
+ return "", err
+ }
+ folder, _ := filepath.Split(p)
+ return folder, nil
+}
+
+// Depricated. Same as Executable().
+func GetExePath() (exePath string, err error) {
+ return Executable()
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go
new file mode 100644
index 00000000..e88c1e09
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_plan9.go
@@ -0,0 +1,16 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osext
+
+import "syscall"
+
+func executable() (string, error) {
+ f, err := Open("/proc/" + itoa(Getpid()) + "/text")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return syscall.Fd2path(int(f.Fd()))
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go
new file mode 100644
index 00000000..546fec91
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_procfs.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux netbsd openbsd
+
+package osext
+
+import (
+ "errors"
+ "os"
+ "runtime"
+)
+
+func executable() (string, error) {
+ switch runtime.GOOS {
+ case "linux":
+ return os.Readlink("/proc/self/exe")
+ case "netbsd":
+ return os.Readlink("/proc/curproc/exe")
+ case "openbsd":
+ return os.Readlink("/proc/curproc/file")
+ }
+ return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go
new file mode 100644
index 00000000..d7646462
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_sysctl.go
@@ -0,0 +1,64 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd
+
+package osext
+
+import (
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+var startUpcwd, getwdError = os.Getwd()
+
+func executable() (string, error) {
+ var mib [4]int32
+ switch runtime.GOOS {
+ case "freebsd":
+ mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
+ case "darwin":
+ mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
+ }
+
+ n := uintptr(0)
+ // get length
+ _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
+ if err != 0 {
+ return "", err
+ }
+ if n == 0 { // shouldn't happen
+ return "", nil
+ }
+ buf := make([]byte, n)
+ _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
+ if err != 0 {
+ return "", err
+ }
+ if n == 0 { // shouldn't happen
+ return "", nil
+ }
+ for i, v := range buf {
+ if v == 0 {
+ buf = buf[:i]
+ break
+ }
+ }
+ if buf[0] != '/' {
+ if getwdError != nil {
+ return string(buf), getwdError
+ } else {
+ if buf[0] == '.' {
+ buf = buf[1:]
+ }
+ if startUpcwd[len(startUpcwd)-1] != '/' {
+ return startUpcwd + "/" + string(buf), nil
+ }
+ return startUpcwd + string(buf), nil
+ }
+ }
+ return string(buf), nil
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go
new file mode 100644
index 00000000..dc661dbc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_test.go
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin linux freebsd netbsd windows
+
+package osext
+
+import (
+ "fmt"
+ "os"
+ oexec "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
+
+func TestExecPath(t *testing.T) {
+ ep, err := Executable()
+ if err != nil {
+ t.Fatalf("ExecPath failed: %v", err)
+ }
+ // we want fn to be of the form "dir/prog"
+ dir := filepath.Dir(filepath.Dir(ep))
+ fn, err := filepath.Rel(dir, ep)
+ if err != nil {
+ t.Fatalf("filepath.Rel: %v", err)
+ }
+ cmd := &oexec.Cmd{}
+ // make child start with a relative program path
+ cmd.Dir = dir
+ cmd.Path = fn
+ // forge argv[0] for child, so that we can verify we could correctly
+ // get real path of the executable without influenced by argv[0].
+ cmd.Args = []string{"-", "-test.run=XXXX"}
+ cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("exec(self) failed: %v", err)
+ }
+ outs := string(out)
+ if !filepath.IsAbs(outs) {
+ t.Fatalf("Child returned %q, want an absolute path", out)
+ }
+ if !sameFile(outs, ep) {
+ t.Fatalf("Child returned %q, not the same file as %q", out, ep)
+ }
+}
+
+func sameFile(fn1, fn2 string) bool {
+ fi1, err := os.Stat(fn1)
+ if err != nil {
+ return false
+ }
+ fi2, err := os.Stat(fn2)
+ if err != nil {
+ return false
+ }
+ return os.SameFile(fi1, fi2)
+}
+
+func init() {
+ if e := os.Getenv(execPath_EnvVar); e != "" {
+ // first chdir to another path
+ dir := "/"
+ if runtime.GOOS == "windows" {
+ dir = filepath.VolumeName(".")
+ }
+ os.Chdir(dir)
+ if ep, err := Executable(); err != nil {
+ fmt.Fprint(os.Stderr, "ERROR: ", err)
+ } else {
+ fmt.Fprint(os.Stderr, ep)
+ }
+ os.Exit(0)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go
new file mode 100644
index 00000000..72d282cf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/osext/osext_windows.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osext
+
+import (
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
+)
+
+// GetModuleFileName() with hModule = NULL
+func executable() (exePath string, err error) {
+ return getModuleFileName()
+}
+
+func getModuleFileName() (string, error) {
+ var n uint32
+ b := make([]uint16, syscall.MAX_PATH)
+ size := uint32(len(b))
+
+ r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ return "", e1
+ }
+ return string(utf16.Decode(b[0:n])), nil
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE
new file mode 100644
index 00000000..f9c841a5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md
new file mode 100644
index 00000000..d0a59675
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/README.md
@@ -0,0 +1,101 @@
+# panicwrap
+
+panicwrap is a Go library that re-executes a Go binary and monitors stderr
+output from the binary for a panic. When it find a panic, it executes a
+user-defined handler function. Stdout, stderr, stdin, signals, and exit
+codes continue to work as normal, making the existence of panicwrap mostly
+invisble to the end user until a panic actually occurs.
+
+Since a panic is truly a bug in the program meant to crash the runtime,
+globally catching panics within Go applications is not supposed to be possible.
+Despite this, it is often useful to have a way to know when panics occur.
+panicwrap allows you to do something with these panics, such as writing them
+to a file, so that you can track when panics occur.
+
+panicwrap is ***not a panic recovery system***. Panics indicate serious
+problems with your application and _should_ crash the runtime. panicwrap
+is just meant as a way to monitor for panics. If you still think this is
+the worst idea ever, read the section below on why.
+
+## Features
+
+* **SIMPLE!**
+* Works with all Go applications on all platforms Go supports
+* Custom behavior when a panic occurs
+* Stdout, stderr, stdin, exit codes, and signals continue to work as
+ expected.
+
+## Usage
+
+Using panicwrap is simple. It behaves a lot like `fork`, if you know
+how that works. A basic example is shown below.
+
+Because it would be sad to panic while capturing a panic, it is recommended
+that the handler functions for panicwrap remain relatively simple and well
+tested. panicwrap itself contains many tests.
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/mitchellh/panicwrap"
+ "os"
+)
+
+func main() {
+ exitStatus, err := panicwrap.BasicWrap(panicHandler)
+ if err != nil {
+ // Something went wrong setting up the panic wrapper. Unlikely,
+ // but possible.
+ panic(err)
+ }
+
+ // If exitStatus >= 0, then we're the parent process and the panicwrap
+ // re-executed ourselves and completed. Just exit with the proper status.
+ if exitStatus >= 0 {
+ os.Exit(exitStatus)
+ }
+
+ // Otherwise, exitStatus < 0 means we're the child. Continue executing as
+ // normal...
+
+ // Let's say we panic
+ panic("oh shucks")
+}
+
+func panicHandler(output string) {
+ // output contains the full output (including stack traces) of the
+ // panic. Put it in a file or something.
+ fmt.Printf("The child panicked:\n\n%s\n", output)
+ os.Exit(1)
+}
+```
+
+## How Does it Work?
+
+panicwrap works by re-executing the running program (retaining arguments,
+environmental variables, etc.) and monitoring the stderr of the program.
+Since Go always outputs panics in a predictable way with a predictable
+exit code, panicwrap is able to reliably detect panics and allow the parent
+process to handle them.
+
+## WHY?! Panics should CRASH!
+
+Yes, panics _should_ crash. They are 100% always indicative of bugs.
+However, in some cases, such as user-facing programs (programs like
+[Packer](http://github.com/mitchellh/packer) or
+[Docker](http://github.com/dotcloud/docker)), it is up to the user to
+report such panics. This is unreliable, at best, and it would be better if the
+program could have a way to automatically report panics. panicwrap provides
+a way to do this.
+
+For backend applications, it is easier to detect crashes (since the application
+exits). However, it is still nice sometimes to more intelligently log
+panics in some way. For example, at [HashiCorp](http://www.hashicorp.com),
+we use panicwrap to log panics to timestamped files with some additional
+data (configuration settings at the time, environmental variables, etc.)
+
+The goal of panicwrap is _not_ to hide panics. It is instead to provide
+a clean mechanism for handling them before bubbling the up to the user
+and ultimately crashing.
diff --git a/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go
new file mode 100644
index 00000000..1c64a546
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor.go
@@ -0,0 +1,63 @@
+// +build !windows
+
+package panicwrap
+
+import (
+ "github.com/bugsnag/osext"
+ "os"
+ "os/exec"
+ "syscall"
+)
+
+func monitor(c *WrapConfig) (int, error) {
+
+ // If we're the child process, absorb panics.
+ if Wrapped(c) {
+ panicCh := make(chan string)
+
+ go trackPanic(os.Stdin, os.Stderr, c.DetectDuration, panicCh)
+
+ // Wait on the panic data
+ panicTxt := <-panicCh
+ if panicTxt != "" {
+ if !c.HidePanic {
+ os.Stderr.Write([]byte(panicTxt))
+ }
+
+ c.Handler(panicTxt)
+ }
+
+ os.Exit(0)
+ }
+
+ exePath, err := osext.Executable()
+ if err != nil {
+ return -1, err
+ }
+ cmd := exec.Command(exePath, os.Args[1:]...)
+
+ read, write, err := os.Pipe()
+ if err != nil {
+ return -1, err
+ }
+
+ cmd.Stdin = read
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue)
+
+ if err != nil {
+ return -1, err
+ }
+ err = cmd.Start()
+ if err != nil {
+ return -1, err
+ }
+
+ err = syscall.Dup2(int(write.Fd()), int(os.Stderr.Fd()))
+ if err != nil {
+ return -1, err
+ }
+
+ return -1, nil
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go
new file mode 100644
index 00000000..d07a6921
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/monitor_windows.go
@@ -0,0 +1,7 @@
+package panicwrap
+
+import "fmt"
+
+func monitor(c *WrapConfig) (int, error) {
+ return -1, fmt.Errorf("Monitor is not supported on windows")
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go
new file mode 100644
index 00000000..f9ea3e3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap.go
@@ -0,0 +1,339 @@
+// The panicwrap package provides functions for capturing and handling
+// panics in your application. It does this by re-executing the running
+// application and monitoring stderr for any panics. At the same time,
+// stdout/stderr/etc. are set to the same values so that data is shuttled
+// through properly, making the existence of panicwrap mostly transparent.
+//
+// Panics are only detected when the subprocess exits with a non-zero
+// exit status, since this is the only time panics are real. Otherwise,
+// "panic-like" output is ignored.
+package panicwrap
+
+import (
+ "bytes"
+ "errors"
+ "github.com/bugsnag/osext"
+ "io"
+ "os"
+ "os/exec"
+ "os/signal"
+ "runtime"
+ "syscall"
+ "time"
+)
+
+const (
+ DEFAULT_COOKIE_KEY = "cccf35992f8f3cd8d1d28f0109dd953e26664531"
+ DEFAULT_COOKIE_VAL = "7c28215aca87789f95b406b8dd91aa5198406750"
+)
+
+// HandlerFunc is the type called when a panic is detected.
+type HandlerFunc func(string)
+
+// WrapConfig is the configuration for panicwrap when wrapping an existing
+// binary. To get started, in general, you only need the BasicWrap function
+// that will set this up for you. However, for more customizability,
+// WrapConfig and Wrap can be used.
+type WrapConfig struct {
+ // Handler is the function called when a panic occurs.
+ Handler HandlerFunc
+
+ // The cookie key and value are used within environmental variables
+ // to tell the child process that it is already executing so that
+ // wrap doesn't re-wrap itself.
+ CookieKey string
+ CookieValue string
+
+ // If true, the panic will not be mirrored to the configured writer
+ // and will instead ONLY go to the handler. This lets you effectively
+ // hide panics from the end user. This is not recommended because if
+ // your handler fails, the panic is effectively lost.
+ HidePanic bool
+
+ // If true, panicwrap will boot a monitor sub-process and let the parent
+ // run the app. This mode is useful for processes run under supervisors
+ // like runit as signals get sent to the correct codebase. This is not
+ // supported when GOOS=windows, and ignores c.Stderr and c.Stdout.
+ Monitor bool
+
+ // The amount of time that a process must exit within after detecting
+ // a panic header for panicwrap to assume it is a panic. Defaults to
+ // 300 milliseconds.
+ DetectDuration time.Duration
+
+ // The writer to send the stderr to. If this is nil, then it defaults
+ // to os.Stderr.
+ Writer io.Writer
+
+ // The writer to send stdout to. If this is nil, then it defaults to
+ // os.Stdout.
+ Stdout io.Writer
+}
+
+// BasicWrap calls Wrap with the given handler function, using defaults
+// for everything else. See Wrap and WrapConfig for more information on
+// functionality and return values.
+func BasicWrap(f HandlerFunc) (int, error) {
+ return Wrap(&WrapConfig{
+ Handler: f,
+ })
+}
+
+// BasicMonitor calls Wrap with Monitor set to true on supported platforms.
+// It forks your program and runs it again form the start. In one process
+// BasicMonitor never returns, it just listens on stderr of the other process,
+// and calls your handler when a panic is seen. In the other it either returns
+// nil to indicate that the panic monitoring is enabled, or an error to indicate
+// that something else went wrong.
+func BasicMonitor(f HandlerFunc) error {
+ exitStatus, err := Wrap(&WrapConfig{
+ Handler: f,
+ Monitor: runtime.GOOS != "windows",
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if exitStatus >= 0 {
+ os.Exit(exitStatus)
+ }
+
+ return nil
+}
+
+// Wrap wraps the current executable in a handler to catch panics. It
+// returns an error if there was an error during the wrapping process.
+// If the error is nil, then the int result indicates the exit status of the
+// child process. If the exit status is -1, then this is the child process,
+// and execution should continue as normal. Otherwise, this is the parent
+// process and the child successfully ran already, and you should exit the
+// process with the returned exit status.
+//
+// This function should be called very very early in your program's execution.
+// Ideally, this runs as the first line of code of main.
+//
+// Once this is called, the given WrapConfig shouldn't be modified or used
+// any further.
+func Wrap(c *WrapConfig) (int, error) {
+ if c.Handler == nil {
+ return -1, errors.New("Handler must be set")
+ }
+
+ if c.DetectDuration == 0 {
+ c.DetectDuration = 300 * time.Millisecond
+ }
+
+ if c.Writer == nil {
+ c.Writer = os.Stderr
+ }
+
+ if c.Monitor {
+ return monitor(c)
+ } else {
+ return wrap(c)
+ }
+}
+
+func wrap(c *WrapConfig) (int, error) {
+
+ // If we're already wrapped, exit out.
+ if Wrapped(c) {
+ return -1, nil
+ }
+
+ // Get the path to our current executable
+ exePath, err := osext.Executable()
+ if err != nil {
+ return -1, err
+ }
+
+ // Pipe the stderr so we can read all the data as we look for panics
+ stderr_r, stderr_w := io.Pipe()
+
+ // doneCh is closed when we're done, signaling any other goroutines
+ // to end immediately.
+ doneCh := make(chan struct{})
+
+ // panicCh is the channel on which the panic text will actually be
+ // sent.
+ panicCh := make(chan string)
+
+ // On close, make sure to finish off the copying of data to stderr
+ defer func() {
+ defer close(doneCh)
+ stderr_w.Close()
+ <-panicCh
+ }()
+
+ // Start the goroutine that will watch stderr for any panics
+ go trackPanic(stderr_r, c.Writer, c.DetectDuration, panicCh)
+
+ // Create the writer for stdout that we're going to use
+ var stdout_w io.Writer = os.Stdout
+ if c.Stdout != nil {
+ stdout_w = c.Stdout
+ }
+
+ // Build a subcommand to re-execute ourselves. We make sure to
+ // set the environmental variable to include our cookie. We also
+ // set stdin/stdout to match the config. Finally, we pipe stderr
+ // through ourselves in order to watch for panics.
+ cmd := exec.Command(exePath, os.Args[1:]...)
+ cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = stdout_w
+ cmd.Stderr = stderr_w
+ if err := cmd.Start(); err != nil {
+ return 1, err
+ }
+
+ // Listen to signals and capture them forever. We allow the child
+ // process to handle them in some way.
+ sigCh := make(chan os.Signal)
+ signal.Notify(sigCh, os.Interrupt)
+ go func() {
+ defer signal.Stop(sigCh)
+ for {
+ select {
+ case <-doneCh:
+ return
+ case <-sigCh:
+ }
+ }
+ }()
+
+ if err := cmd.Wait(); err != nil {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // This is some other kind of subprocessing error.
+ return 1, err
+ }
+
+ exitStatus := 1
+ if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
+ exitStatus = status.ExitStatus()
+ }
+
+ // Close the writer end so that the tracker goroutine ends at some point
+ stderr_w.Close()
+
+ // Wait on the panic data
+ panicTxt := <-panicCh
+ if panicTxt != "" {
+ if !c.HidePanic {
+ c.Writer.Write([]byte(panicTxt))
+ }
+
+ c.Handler(panicTxt)
+ }
+
+ return exitStatus, nil
+ }
+
+ return 0, nil
+}
+
+// Wrapped checks if we're already wrapped according to the configuration
+// given.
+//
+// Wrapped is very cheap and can be used early to short-circuit some pre-wrap
+// logic your application may have.
+func Wrapped(c *WrapConfig) bool {
+ if c.CookieKey == "" {
+ c.CookieKey = DEFAULT_COOKIE_KEY
+ }
+
+ if c.CookieValue == "" {
+ c.CookieValue = DEFAULT_COOKIE_VAL
+ }
+
+ // If the cookie key/value match our environment, then we are the
+ // child, so just exit now and tell the caller that we're the child
+ return os.Getenv(c.CookieKey) == c.CookieValue
+}
+
+// trackPanic monitors the given reader for a panic. If a panic is detected,
+// it is outputted on the result channel. This will close the channel once
+// it is complete.
+func trackPanic(r io.Reader, w io.Writer, dur time.Duration, result chan<- string) {
+ defer close(result)
+
+ var panicTimer <-chan time.Time
+ panicBuf := new(bytes.Buffer)
+ panicHeader := []byte("panic:")
+
+ tempBuf := make([]byte, 2048)
+ for {
+ var buf []byte
+ var n int
+
+ if panicTimer == nil && panicBuf.Len() > 0 {
+ // We're not tracking a panic but the buffer length is
+ // greater than 0. We need to clear out that buffer, but
+ // look for another panic along the way.
+
+ // First, remove the previous panic header so we don't loop
+ w.Write(panicBuf.Next(len(panicHeader)))
+
+ // Next, assume that this is our new buffer to inspect
+ n = panicBuf.Len()
+ buf = make([]byte, n)
+ copy(buf, panicBuf.Bytes())
+ panicBuf.Reset()
+ } else {
+ var err error
+ buf = tempBuf
+ n, err = r.Read(buf)
+ if n <= 0 && err == io.EOF {
+ if panicBuf.Len() > 0 {
+ // We were tracking a panic, assume it was a panic
+ // and return that as the result.
+ result <- panicBuf.String()
+ }
+
+ return
+ }
+ }
+
+ if panicTimer != nil {
+ // We're tracking what we think is a panic right now.
+ // If the timer ended, then it is not a panic.
+ isPanic := true
+ select {
+ case <-panicTimer:
+ isPanic = false
+ default:
+ }
+
+ // No matter what, buffer the text some more.
+ panicBuf.Write(buf[0:n])
+
+ if !isPanic {
+ // It isn't a panic, stop tracking. Clean-up will happen
+ // on the next iteration.
+ panicTimer = nil
+ }
+
+ continue
+ }
+
+ flushIdx := n
+ idx := bytes.Index(buf[0:n], panicHeader)
+ if idx >= 0 {
+ flushIdx = idx
+ }
+
+ // Flush to stderr what isn't a panic
+ w.Write(buf[0:flushIdx])
+
+ if idx < 0 {
+ // Not a panic so just continue along
+ continue
+ }
+
+ // We have a panic header. Write we assume is a panic os far.
+ panicBuf.Write(buf[idx:n])
+ panicTimer = time.After(dur)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go
new file mode 100644
index 00000000..dd1d77f2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/bugsnag/panicwrap/panicwrap_test.go
@@ -0,0 +1,360 @@
+package panicwrap
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+ "time"
+)
+
+func helperProcess(s ...string) *exec.Cmd {
+ cs := []string{"-test.run=TestHelperProcess", "--"}
+ cs = append(cs, s...)
+ env := []string{
+ "GO_WANT_HELPER_PROCESS=1",
+ }
+
+ cmd := exec.Command(os.Args[0], cs...)
+ cmd.Env = append(env, os.Environ()...)
+ cmd.Stdin = os.Stdin
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stdout
+ return cmd
+}
+
+// This is executed by `helperProcess` in a separate process in order to
+// provider a proper sub-process environment to test some of our functionality.
+func TestHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+
+ // Find the arguments to our helper, which are the arguments past
+ // the "--" in the command line.
+ args := os.Args
+ for len(args) > 0 {
+ if args[0] == "--" {
+ args = args[1:]
+ break
+ }
+
+ args = args[1:]
+ }
+
+ if len(args) == 0 {
+ fmt.Fprintf(os.Stderr, "No command\n")
+ os.Exit(2)
+ }
+
+ panicHandler := func(s string) {
+ fmt.Fprintf(os.Stdout, "wrapped: %d", len(s))
+ os.Exit(0)
+ }
+
+ cmd, args := args[0], args[1:]
+ switch cmd {
+ case "no-panic-ordered-output":
+ exitStatus, err := BasicWrap(panicHandler)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ if exitStatus < 0 {
+ for i := 0; i < 1000; i++ {
+ os.Stdout.Write([]byte("a"))
+ os.Stderr.Write([]byte("b"))
+ }
+ os.Exit(0)
+ }
+
+ os.Exit(exitStatus)
+ case "no-panic-output":
+ fmt.Fprint(os.Stdout, "i am output")
+ fmt.Fprint(os.Stderr, "stderr out")
+ os.Exit(0)
+ case "panic-boundary":
+ exitStatus, err := BasicWrap(panicHandler)
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ if exitStatus < 0 {
+ // Simulate a panic but on two boundaries...
+ fmt.Fprint(os.Stderr, "pan")
+ os.Stderr.Sync()
+ time.Sleep(100 * time.Millisecond)
+ fmt.Fprint(os.Stderr, "ic: oh crap")
+ os.Exit(2)
+ }
+
+ os.Exit(exitStatus)
+ case "panic-long":
+ exitStatus, err := BasicWrap(panicHandler)
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ if exitStatus < 0 {
+ // Make a fake panic by faking the header and adding a
+ // bunch of garbage.
+ fmt.Fprint(os.Stderr, "panic: foo\n\n")
+ for i := 0; i < 1024; i++ {
+ fmt.Fprint(os.Stderr, "foobarbaz")
+ }
+
+ // Sleep so that it dumps the previous data
+ //time.Sleep(1 * time.Millisecond)
+ time.Sleep(500 * time.Millisecond)
+
+ // Make a real panic
+ panic("I AM REAL!")
+ }
+
+ os.Exit(exitStatus)
+ case "panic":
+ hidePanic := false
+ if args[0] == "hide" {
+ hidePanic = true
+ }
+
+ config := &WrapConfig{
+ Handler: panicHandler,
+ HidePanic: hidePanic,
+ }
+
+ exitStatus, err := Wrap(config)
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ if exitStatus < 0 {
+ panic("uh oh")
+ }
+
+ os.Exit(exitStatus)
+ case "wrapped":
+ child := false
+ if len(args) > 0 && args[0] == "child" {
+ child = true
+ }
+ config := &WrapConfig{
+ Handler: panicHandler,
+ }
+
+ exitStatus, err := Wrap(config)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ if exitStatus < 0 {
+ if child {
+ fmt.Printf("%v", Wrapped(config))
+ }
+ os.Exit(0)
+ }
+
+ if !child {
+ fmt.Printf("%v", Wrapped(config))
+ }
+ os.Exit(exitStatus)
+ case "panic-monitor":
+
+ config := &WrapConfig{
+ Handler: panicHandler,
+ HidePanic: true,
+ Monitor: true,
+ }
+
+ exitStatus, err := Wrap(config)
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ if exitStatus != -1 {
+ fmt.Fprintf(os.Stderr, "wrap error: %s", err)
+ os.Exit(1)
+ }
+
+ panic("uh oh")
+
+ default:
+ fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd)
+ os.Exit(2)
+ }
+}
+
+func TestPanicWrap_Output(t *testing.T) {
+ stderr := new(bytes.Buffer)
+ stdout := new(bytes.Buffer)
+
+ p := helperProcess("no-panic-output")
+ p.Stdout = stdout
+ p.Stderr = stderr
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "i am output") {
+ t.Fatalf("didn't forward: %#v", stdout.String())
+ }
+
+ if !strings.Contains(stderr.String(), "stderr out") {
+ t.Fatalf("didn't forward: %#v", stderr.String())
+ }
+}
+
+/*
+TODO(mitchellh): This property would be nice to gain.
+func TestPanicWrap_Output_Order(t *testing.T) {
+ output := new(bytes.Buffer)
+
+ p := helperProcess("no-panic-ordered-output")
+ p.Stdout = output
+ p.Stderr = output
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expectedBuf := new(bytes.Buffer)
+ for i := 0; i < 1000; i++ {
+ expectedBuf.WriteString("ab")
+ }
+
+ actual := strings.TrimSpace(output.String())
+ expected := strings.TrimSpace(expectedBuf.String())
+
+ if actual != expected {
+ t.Fatalf("bad: %#v", actual)
+ }
+}
+*/
+
+func TestPanicWrap_panicHide(t *testing.T) {
+ stdout := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+
+ p := helperProcess("panic", "hide")
+ p.Stdout = stdout
+ p.Stderr = stderr
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "wrapped:") {
+ t.Fatalf("didn't wrap: %#v", stdout.String())
+ }
+
+ if strings.Contains(stderr.String(), "panic:") {
+ t.Fatalf("shouldn't have panic: %#v", stderr.String())
+ }
+}
+
+func TestPanicWrap_panicShow(t *testing.T) {
+ stdout := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+
+ p := helperProcess("panic", "show")
+ p.Stdout = stdout
+ p.Stderr = stderr
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "wrapped:") {
+ t.Fatalf("didn't wrap: %#v", stdout.String())
+ }
+
+ if !strings.Contains(stderr.String(), "panic:") {
+ t.Fatalf("should have panic: %#v", stderr.String())
+ }
+}
+
+func TestPanicWrap_panicLong(t *testing.T) {
+ stdout := new(bytes.Buffer)
+
+ p := helperProcess("panic-long")
+ p.Stdout = stdout
+ p.Stderr = new(bytes.Buffer)
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "wrapped:") {
+ t.Fatalf("didn't wrap: %#v", stdout.String())
+ }
+}
+
+func TestPanicWrap_panicBoundary(t *testing.T) {
+ // TODO(mitchellh): panics are currently lost on boundaries
+ t.SkipNow()
+
+ stdout := new(bytes.Buffer)
+
+ p := helperProcess("panic-boundary")
+ p.Stdout = stdout
+ //p.Stderr = new(bytes.Buffer)
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "wrapped: 1015") {
+ t.Fatalf("didn't wrap: %#v", stdout.String())
+ }
+}
+
+func TestPanicWrap_monitor(t *testing.T) {
+
+ stdout := new(bytes.Buffer)
+
+ p := helperProcess("panic-monitor")
+ p.Stdout = stdout
+ //p.Stderr = new(bytes.Buffer)
+ if err := p.Run(); err == nil || err.Error() != "exit status 2" {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "wrapped:") {
+ t.Fatalf("didn't wrap: %#v", stdout.String())
+ }
+}
+
+func TestWrapped(t *testing.T) {
+ stdout := new(bytes.Buffer)
+
+ p := helperProcess("wrapped", "child")
+ p.Stdout = stdout
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "true") {
+ t.Fatalf("bad: %#v", stdout.String())
+ }
+}
+
+func TestWrapped_parent(t *testing.T) {
+ stdout := new(bytes.Buffer)
+
+ p := helperProcess("wrapped")
+ p.Stdout = stdout
+ if err := p.Run(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !strings.Contains(stdout.String(), "false") {
+ t.Fatalf("bad: %#v", stdout.String())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/attempt.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/attempt.go
new file mode 100644
index 00000000..c0654f5d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/attempt.go
@@ -0,0 +1,74 @@
+package aws
+
+import (
+ "time"
+)
+
+// AttemptStrategy represents a strategy for waiting for an action
+// to complete successfully. This is an internal type used by the
+// implementation of other goamz packages.
+type AttemptStrategy struct {
+ Total time.Duration // total duration of attempt.
+ Delay time.Duration // interval between each try in the burst.
+ Min int // minimum number of retries; overrides Total
+}
+
+type Attempt struct {
+ strategy AttemptStrategy
+ last time.Time
+ end time.Time
+ force bool
+ count int
+}
+
+// Start begins a new sequence of attempts for the given strategy.
+func (s AttemptStrategy) Start() *Attempt {
+ now := time.Now()
+ return &Attempt{
+ strategy: s,
+ last: now,
+ end: now.Add(s.Total),
+ force: true,
+ }
+}
+
+// Next waits until it is time to perform the next attempt or returns
+// false if it is time to stop trying.
+func (a *Attempt) Next() bool {
+ now := time.Now()
+ sleep := a.nextSleep(now)
+ if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
+ return false
+ }
+ a.force = false
+ if sleep > 0 && a.count > 0 {
+ time.Sleep(sleep)
+ now = time.Now()
+ }
+ a.count++
+ a.last = now
+ return true
+}
+
+func (a *Attempt) nextSleep(now time.Time) time.Duration {
+ sleep := a.strategy.Delay - now.Sub(a.last)
+ if sleep < 0 {
+ return 0
+ }
+ return sleep
+}
+
+// HasNext returns whether another attempt will be made if the current
+// one fails. If it returns true, the following call to Next is
+// guaranteed to return true.
+func (a *Attempt) HasNext() bool {
+ if a.force || a.strategy.Min > a.count {
+ return true
+ }
+ now := time.Now()
+ if now.Add(a.nextSleep(now)).Before(a.end) {
+ a.force = true
+ return true
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/attempt_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/attempt_test.go
new file mode 100644
index 00000000..c83b185e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/attempt_test.go
@@ -0,0 +1,57 @@
+package aws_test
+
+import (
+ "github.com/crowdmob/goamz/aws"
+ "gopkg.in/check.v1"
+ "time"
+)
+
+func (S) TestAttemptTiming(c *check.C) {
+ testAttempt := aws.AttemptStrategy{
+ Total: 0.25e9,
+ Delay: 0.1e9,
+ }
+ want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
+ got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
+ t0 := time.Now()
+ for a := testAttempt.Start(); a.Next(); {
+ got = append(got, time.Now().Sub(t0))
+ }
+ got = append(got, time.Now().Sub(t0))
+ c.Assert(got, check.HasLen, len(want))
+ const margin = 0.01e9
+ for i, got := range want {
+ lo := want[i] - margin
+ hi := want[i] + margin
+ if got < lo || got > hi {
+ c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
+ }
+ }
+}
+
+func (S) TestAttemptNextHasNext(c *check.C) {
+ a := aws.AttemptStrategy{}.Start()
+ c.Assert(a.Next(), check.Equals, true)
+ c.Assert(a.Next(), check.Equals, false)
+
+ a = aws.AttemptStrategy{}.Start()
+ c.Assert(a.Next(), check.Equals, true)
+ c.Assert(a.HasNext(), check.Equals, false)
+ c.Assert(a.Next(), check.Equals, false)
+
+ a = aws.AttemptStrategy{Total: 2e8}.Start()
+ c.Assert(a.Next(), check.Equals, true)
+ c.Assert(a.HasNext(), check.Equals, true)
+ time.Sleep(2e8)
+ c.Assert(a.HasNext(), check.Equals, true)
+ c.Assert(a.Next(), check.Equals, true)
+ c.Assert(a.Next(), check.Equals, false)
+
+ a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
+ time.Sleep(1e8)
+ c.Assert(a.Next(), check.Equals, true)
+ c.Assert(a.HasNext(), check.Equals, true)
+ c.Assert(a.Next(), check.Equals, true)
+ c.Assert(a.HasNext(), check.Equals, false)
+ c.Assert(a.Next(), check.Equals, false)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/aws.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/aws.go
new file mode 100644
index 00000000..89be74b5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/aws.go
@@ -0,0 +1,616 @@
+//
+// goamz - Go packages to interact with the Amazon Web Services.
+//
+// https://wiki.ubuntu.com/goamz
+//
+// Copyright (c) 2011 Canonical Ltd.
+//
+// Written by Gustavo Niemeyer
+//
+package aws
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "os/user"
+ "path"
+ "regexp"
+ "strings"
+ "time"
+)
+
+// Regular expressions for INI files
+var (
+ iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`)
+ iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`)
+)
+
+// Defines the valid signers
+const (
+ V2Signature = iota
+ V4Signature = iota
+ Route53Signature = iota
+)
+
+// Defines the service endpoint and correct Signer implementation to use
+// to sign requests for this endpoint
+type ServiceInfo struct {
+ Endpoint string
+ Signer uint
+}
+
+// Region defines the URLs where AWS services may be accessed.
+//
+// See http://goo.gl/d8BP1 for more details.
+type Region struct {
+ Name string // the canonical name of this region.
+ EC2Endpoint string
+ S3Endpoint string
+ S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
+ S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
+ S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
+ SDBEndpoint string
+ SNSEndpoint string
+ SQSEndpoint string
+ SESEndpoint string
+ IAMEndpoint string
+ ELBEndpoint string
+ DynamoDBEndpoint string
+ CloudWatchServicepoint ServiceInfo
+ AutoScalingEndpoint string
+ RDSEndpoint ServiceInfo
+ KinesisEndpoint string
+ STSEndpoint string
+ CloudFormationEndpoint string
+ ElastiCacheEndpoint string
+}
+
+var Regions = map[string]Region{
+ APNortheast.Name: APNortheast,
+ APSoutheast.Name: APSoutheast,
+ APSoutheast2.Name: APSoutheast2,
+ EUCentral.Name: EUCentral,
+ EUWest.Name: EUWest,
+ USEast.Name: USEast,
+ USWest.Name: USWest,
+ USWest2.Name: USWest2,
+ USGovWest.Name: USGovWest,
+ SAEast.Name: SAEast,
+}
+
+// Designates a signer interface suitable for signing AWS requests, params
+// should be appropriately encoded for the request before signing.
+//
+// A signer should be initialized with Auth and the appropriate endpoint.
+type Signer interface {
+ Sign(method, path string, params map[string]string)
+}
+
+// An AWS Service interface with the API to query the AWS service
+//
+// Supplied as an easy way to mock out service calls during testing.
+type AWSService interface {
+ // Queries the AWS service at a given method/path with the params and
+ // returns an http.Response and error
+ Query(method, path string, params map[string]string) (*http.Response, error)
+ // Builds an error given an XML payload in the http.Response, can be used
+ // to process an error if the status code is not 200 for example.
+ BuildError(r *http.Response) error
+}
+
+// Implements a Server Query/Post API to easily query AWS services and build
+// errors when desired
+type Service struct {
+ service ServiceInfo
+ signer Signer
+}
+
+// Create a base set of params for an action
+func MakeParams(action string) map[string]string {
+ params := make(map[string]string)
+ params["Action"] = action
+ return params
+}
+
+// Create a new AWS server to handle making requests
+func NewService(auth Auth, service ServiceInfo) (s *Service, err error) {
+ var signer Signer
+ switch service.Signer {
+ case V2Signature:
+ signer, err = NewV2Signer(auth, service)
+ // case V4Signature:
+ // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"])
+ default:
+ err = fmt.Errorf("Unsupported signer for service")
+ }
+ if err != nil {
+ return
+ }
+ s = &Service{service: service, signer: signer}
+ return
+}
+
+func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) {
+ params["Timestamp"] = time.Now().UTC().Format(time.RFC3339)
+ u, err := url.Parse(s.service.Endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = path
+
+ s.signer.Sign(method, path, params)
+ if method == "GET" {
+ u.RawQuery = multimap(params).Encode()
+ resp, err = http.Get(u.String())
+ } else if method == "POST" {
+ resp, err = http.PostForm(u.String(), multimap(params))
+ }
+
+ return
+}
+
+func (s *Service) BuildError(r *http.Response) error {
+ errors := ErrorResponse{}
+ xml.NewDecoder(r.Body).Decode(&errors)
+ var err Error
+ err = errors.Errors
+ err.RequestId = errors.RequestId
+ err.StatusCode = r.StatusCode
+ if err.Message == "" {
+ err.Message = r.Status
+ }
+ return &err
+}
+
+type ServiceError interface {
+ error
+ ErrorCode() string
+}
+
+type ErrorResponse struct {
+ Errors Error `xml:"Error"`
+ RequestId string // A unique ID for tracking the request
+}
+
+type Error struct {
+ StatusCode int
+ Type string
+ Code string
+ Message string
+ RequestId string
+}
+
+func (err *Error) Error() string {
+ return fmt.Sprintf("Type: %s, Code: %s, Message: %s",
+ err.Type, err.Code, err.Message,
+ )
+}
+
+func (err *Error) ErrorCode() string {
+ return err.Code
+}
+
+type Auth struct {
+ AccessKey, SecretKey string
+ token string
+ expiration time.Time
+}
+
+func (a *Auth) Token() string {
+ if a.token == "" {
+ return ""
+ }
+ if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock
+ *a, _ = GetAuth("", "", "", time.Time{})
+ }
+ return a.token
+}
+
+func (a *Auth) Expiration() time.Time {
+ return a.expiration
+}
+
+// To be used with other APIs that return auth credentials such as STS
+func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth {
+ return &Auth{
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ token: token,
+ expiration: expiration,
+ }
+}
+
+// ResponseMetadata
+type ResponseMetadata struct {
+ RequestId string // A unique ID for tracking the request
+}
+
+type BaseResponse struct {
+ ResponseMetadata ResponseMetadata
+}
+
+var unreserved = make([]bool, 128)
+var hex = "0123456789ABCDEF"
+
+func init() {
+ // RFC3986
+ u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
+ for _, c := range u {
+ unreserved[c] = true
+ }
+}
+
+func multimap(p map[string]string) url.Values {
+ q := make(url.Values, len(p))
+ for k, v := range p {
+ q[k] = []string{v}
+ }
+ return q
+}
+
+type credentials struct {
+ Code string
+ LastUpdated string
+ Type string
+ AccessKeyId string
+ SecretAccessKey string
+ Token string
+ Expiration string
+}
+
+// GetMetaData retrieves instance metadata about the current machine.
+//
+// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
+func GetMetaData(path string) (contents []byte, err error) {
+ c := http.Client{
+ Transport: &http.Transport{
+ Dial: func(netw, addr string) (net.Conn, error) {
+ deadline := time.Now().Add(5 * time.Second)
+ c, err := net.DialTimeout(netw, addr, time.Second*2)
+ if err != nil {
+ return nil, err
+ }
+ c.SetDeadline(deadline)
+ return c, nil
+ },
+ },
+ }
+
+ url := "http://169.254.169.254/latest/meta-data/" + path
+
+ resp, err := c.Get(url)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
+ return
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return
+ }
+ return []byte(body), err
+}
+
+func GetRegion(regionName string) (region Region) {
+ region = Regions[regionName]
+ return
+}
+
+// GetInstanceCredentials creates an Auth based on the instance's role credentials.
+// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned.
+// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func GetInstanceCredentials() (cred credentials, err error) {
+ credentialPath := "iam/security-credentials/"
+
+ // Get the instance role
+ role, err := GetMetaData(credentialPath)
+ if err != nil {
+ return
+ }
+
+ // Get the instance role credentials
+ credentialJSON, err := GetMetaData(credentialPath + string(role))
+ if err != nil {
+ return
+ }
+
+ err = json.Unmarshal([]byte(credentialJSON), &cred)
+ return
+}
+
+// GetAuth creates an Auth based on either passed in credentials,
+// environment information or instance based role credentials.
+func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) {
+ // First try passed in credentials
+ if accessKey != "" && secretKey != "" {
+ return Auth{accessKey, secretKey, token, expiration}, nil
+ }
+
+ // Next try to get auth from the environment
+ auth, err = EnvAuth()
+ if err == nil {
+ // Found auth, return
+ return
+ }
+
+ // Next try getting auth from the instance role
+ cred, err := GetInstanceCredentials()
+ if err == nil {
+ // Found auth, return
+ auth.AccessKey = cred.AccessKeyId
+ auth.SecretKey = cred.SecretAccessKey
+ auth.token = cred.Token
+ exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration)
+ if err != nil {
+ err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err)
+ }
+ auth.expiration = exptdate
+ return auth, err
+ }
+
+ // Next try getting auth from the credentials file
+ auth, err = CredentialFileAuth("", "", time.Minute*5)
+ if err == nil {
+ return
+ }
+
+ //err = errors.New("No valid AWS authentication found")
+ err = fmt.Errorf("No valid AWS authentication found: %s", err)
+ return auth, err
+}
+
+// EnvAuth creates an Auth based on environment information.
+// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
+// variables are used.
+func EnvAuth() (auth Auth, err error) {
+ auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
+ if auth.AccessKey == "" {
+ auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if auth.SecretKey == "" {
+ auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
+ }
+ if auth.AccessKey == "" {
+ err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
+ }
+ if auth.SecretKey == "" {
+ err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
+ }
+ return
+}
+
+// CredentialFileAuth creates and Auth based on a credentials file. The file
+// contains various authentication profiles for use with AWS.
+//
+// The credentials file, which is used by other AWS SDKs, is documented at
+// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs
+func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) {
+ if profile == "" {
+ profile = "default"
+ }
+
+ if filePath == "" {
+ u, err := user.Current()
+ if err != nil {
+ return auth, err
+ }
+
+ filePath = path.Join(u.HomeDir, ".aws", "credentials")
+ }
+
+ // read the file, then parse the INI
+ contents, err := ioutil.ReadFile(filePath)
+ if err != nil {
+ return
+ }
+
+ profiles := parseINI(string(contents))
+ profileData, ok := profiles[profile]
+
+ if !ok {
+ err = errors.New("The credentials file did not contain the profile")
+ return
+ }
+
+ keyId, ok := profileData["aws_access_key_id"]
+ if !ok {
+ err = errors.New("The credentials file did not contain required attribute aws_access_key_id")
+ return
+ }
+
+ secretKey, ok := profileData["aws_secret_access_key"]
+ if !ok {
+ err = errors.New("The credentials file did not contain required attribute aws_secret_access_key")
+ return
+ }
+
+ auth.AccessKey = keyId
+ auth.SecretKey = secretKey
+
+ if token, ok := profileData["aws_session_token"]; ok {
+ auth.token = token
+ }
+
+ auth.expiration = time.Now().Add(expiration)
+
+ return
+}
+
+// parseINI takes the contents of a credentials file and returns a map, whose keys
+// are the various profiles, and whose values are maps of the settings for the
+// profiles
+func parseINI(fileContents string) map[string]map[string]string {
+ profiles := make(map[string]map[string]string)
+
+ lines := strings.Split(fileContents, "\n")
+
+ var currentSection map[string]string
+ for _, line := range lines {
+ // remove comments, which start with a semi-colon
+ if split := strings.Split(line, ";"); len(split) > 1 {
+ line = split[0]
+ }
+
+ // check if the line is the start of a profile.
+ //
+ // for example:
+ // [default]
+ //
+ // otherwise, check for the proper setting
+ // property=value
+ if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 {
+ currentSection = make(map[string]string)
+ profiles[sectMatch[1]] = currentSection
+ } else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil {
+ currentSection[setMatch[1]] = setMatch[2]
+ }
+ }
+
+ return profiles
+}
+
+// Encode takes a string and URI-encodes it in a way suitable
+// to be used in AWS signatures.
+func Encode(s string) string {
+ encode := false
+ for i := 0; i != len(s); i++ {
+ c := s[i]
+ if c > 127 || !unreserved[c] {
+ encode = true
+ break
+ }
+ }
+ if !encode {
+ return s
+ }
+ e := make([]byte, len(s)*3)
+ ei := 0
+ for i := 0; i != len(s); i++ {
+ c := s[i]
+ if c > 127 || !unreserved[c] {
+ e[ei] = '%'
+ e[ei+1] = hex[c>>4]
+ e[ei+2] = hex[c&0xF]
+ ei += 3
+ } else {
+ e[ei] = c
+ ei += 1
+ }
+ }
+ return string(e[:ei])
+}
+
+func dialTimeout(network, addr string) (net.Conn, error) {
+ return net.DialTimeout(network, addr, time.Duration(2*time.Second))
+}
+
+func InstanceRegion() string {
+ transport := http.Transport{Dial: dialTimeout}
+ client := http.Client{
+ Transport: &transport,
+ }
+ resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone")
+ if err != nil {
+ return "unknown"
+ } else {
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "unknown"
+ } else {
+ b := string(body)
+ region := b[:len(b)-1]
+ return region
+ }
+ }
+}
+
+func InstanceId() string {
+ transport := http.Transport{Dial: dialTimeout}
+ client := http.Client{
+ Transport: &transport,
+ }
+ resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id")
+ if err != nil {
+ return "unknown"
+ } else {
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "unknown"
+ } else {
+ return string(body)
+ }
+ }
+}
+
+func InstanceType() string {
+ transport := http.Transport{Dial: dialTimeout}
+ client := http.Client{
+ Transport: &transport,
+ }
+ resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type")
+ if err != nil {
+ return "unknown"
+ } else {
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "unknown"
+ } else {
+ return string(body)
+ }
+ }
+}
+
+func ServerLocalIp() string {
+ transport := http.Transport{Dial: dialTimeout}
+ client := http.Client{
+ Transport: &transport,
+ }
+ resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4")
+ if err != nil {
+ return "127.0.0.1"
+ } else {
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "127.0.0.1"
+ } else {
+ return string(body)
+ }
+ }
+}
+
+func ServerPublicIp() string {
+ transport := http.Transport{Dial: dialTimeout}
+ client := http.Client{
+ Transport: &transport,
+ }
+ resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4")
+ if err != nil {
+ return "127.0.0.1"
+ } else {
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "127.0.0.1"
+ } else {
+ return string(body)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/aws_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/aws_test.go
new file mode 100644
index 00000000..e1e68674
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/aws_test.go
@@ -0,0 +1,140 @@
+package aws_test
+
+import (
+ "github.com/crowdmob/goamz/aws"
+ "gopkg.in/check.v1"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+ "time"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&S{})
+
+type S struct {
+ environ []string
+}
+
+func (s *S) SetUpSuite(c *check.C) {
+ s.environ = os.Environ()
+}
+
+func (s *S) TearDownTest(c *check.C) {
+ os.Clearenv()
+ for _, kv := range s.environ {
+ l := strings.SplitN(kv, "=", 2)
+ os.Setenv(l[0], l[1])
+ }
+}
+
+func (s *S) TestEnvAuthNoSecret(c *check.C) {
+ os.Clearenv()
+ _, err := aws.EnvAuth()
+ c.Assert(err, check.ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
+}
+
+func (s *S) TestEnvAuthNoAccess(c *check.C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
+ _, err := aws.EnvAuth()
+ c.Assert(err, check.ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
+}
+
+func (s *S) TestEnvAuth(c *check.C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ auth, err := aws.EnvAuth()
+ c.Assert(err, check.IsNil)
+ c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestEnvAuthAlt(c *check.C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY", "access")
+ auth, err := aws.EnvAuth()
+ c.Assert(err, check.IsNil)
+ c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestGetAuthStatic(c *check.C) {
+ exptdate := time.Now().Add(time.Hour)
+ auth, err := aws.GetAuth("access", "secret", "token", exptdate)
+ c.Assert(err, check.IsNil)
+ c.Assert(auth.AccessKey, check.Equals, "access")
+ c.Assert(auth.SecretKey, check.Equals, "secret")
+ c.Assert(auth.Token(), check.Equals, "token")
+ c.Assert(auth.Expiration(), check.Equals, exptdate)
+}
+
+func (s *S) TestGetAuthEnv(c *check.C) {
+ os.Clearenv()
+ os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
+ os.Setenv("AWS_ACCESS_KEY_ID", "access")
+ auth, err := aws.GetAuth("", "", "", time.Time{})
+ c.Assert(err, check.IsNil)
+ c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
+}
+
+func (s *S) TestEncode(c *check.C) {
+ c.Assert(aws.Encode("foo"), check.Equals, "foo")
+ c.Assert(aws.Encode("/"), check.Equals, "%2F")
+}
+
+func (s *S) TestRegionsAreNamed(c *check.C) {
+ for n, r := range aws.Regions {
+ c.Assert(n, check.Equals, r.Name)
+ }
+}
+
+func (s *S) TestCredentialsFileAuth(c *check.C) {
+ file, err := ioutil.TempFile("", "creds")
+
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ iniFile := `
+
+[default] ; comment 123
+aws_access_key_id = keyid1 ;comment
+aws_secret_access_key=key1
+
+ [profile2]
+ aws_access_key_id = keyid2 ;comment
+ aws_secret_access_key=key2
+ aws_session_token=token1
+
+`
+ _, err = file.WriteString(iniFile)
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ err = file.Close()
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ // check non-existant profile
+ _, err = aws.CredentialFileAuth(file.Name(), "no profile", 30*time.Minute)
+ c.Assert(err, check.Not(check.Equals), nil)
+
+ defaultProfile, err := aws.CredentialFileAuth(file.Name(), "default", 30*time.Minute)
+ c.Assert(err, check.Equals, nil)
+ c.Assert(defaultProfile.AccessKey, check.Equals, "keyid1")
+ c.Assert(defaultProfile.SecretKey, check.Equals, "key1")
+ c.Assert(defaultProfile.Token(), check.Equals, "")
+
+ profile2, err := aws.CredentialFileAuth(file.Name(), "profile2", 30*time.Minute)
+ c.Assert(err, check.Equals, nil)
+ c.Assert(profile2.AccessKey, check.Equals, "keyid2")
+ c.Assert(profile2.SecretKey, check.Equals, "key2")
+ c.Assert(profile2.Token(), check.Equals, "token1")
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/client.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/client.go
new file mode 100644
index 00000000..86d2ccec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/client.go
@@ -0,0 +1,124 @@
+package aws
+
+import (
+ "math"
+ "net"
+ "net/http"
+ "time"
+)
+
+type RetryableFunc func(*http.Request, *http.Response, error) bool
+type WaitFunc func(try int)
+type DeadlineFunc func() time.Time
+
+type ResilientTransport struct {
+ // Timeout is the maximum amount of time a dial will wait for
+ // a connect to complete.
+ //
+ // The default is no timeout.
+ //
+ // With or without a timeout, the operating system may impose
+ // its own earlier timeout. For instance, TCP timeouts are
+ // often around 3 minutes.
+ DialTimeout time.Duration
+
+ // MaxTries, if non-zero, specifies the number of times we will retry on
+ // failure. Retries are only attempted for temporary network errors or known
+ // safe failures.
+ MaxTries int
+ Deadline DeadlineFunc
+ ShouldRetry RetryableFunc
+ Wait WaitFunc
+ transport *http.Transport
+}
+
+// Convenience method for creating an http client
+func NewClient(rt *ResilientTransport) *http.Client {
+ rt.transport = &http.Transport{
+ Dial: func(netw, addr string) (net.Conn, error) {
+ c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
+ if err != nil {
+ return nil, err
+ }
+ c.SetDeadline(rt.Deadline())
+ return c, nil
+ },
+ Proxy: http.ProxyFromEnvironment,
+ }
+ // TODO: Would be nice is ResilientTransport allowed clients to initialize
+ // with http.Transport attributes.
+ return &http.Client{
+ Transport: rt,
+ }
+}
+
+var retryingTransport = &ResilientTransport{
+ Deadline: func() time.Time {
+ return time.Now().Add(5 * time.Second)
+ },
+ DialTimeout: 10 * time.Second,
+ MaxTries: 3,
+ ShouldRetry: awsRetry,
+ Wait: ExpBackoff,
+}
+
+// Exported default client
+var RetryingClient = NewClient(retryingTransport)
+
+func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.tries(req)
+}
+
+// Retry a request a maximum of t.MaxTries times.
+// We'll only retry if the proper criteria are met.
+// If a wait function is specified, wait that amount of time
+// In between requests.
+func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
+ for try := 0; try < t.MaxTries; try += 1 {
+ res, err = t.transport.RoundTrip(req)
+
+ if !t.ShouldRetry(req, res, err) {
+ break
+ }
+ if res != nil {
+ res.Body.Close()
+ }
+ if t.Wait != nil {
+ t.Wait(try)
+ }
+ }
+
+ return
+}
+
+func ExpBackoff(try int) {
+ time.Sleep(100 * time.Millisecond *
+ time.Duration(math.Exp2(float64(try))))
+}
+
+func LinearBackoff(try int) {
+ time.Sleep(time.Duration(try*100) * time.Millisecond)
+}
+
+// Decide if we should retry a request.
+// In general, the criteria for retrying a request is described here
+// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
+func awsRetry(req *http.Request, res *http.Response, err error) bool {
+ retry := false
+
+ // Retry if there's a temporary network error.
+ if neterr, ok := err.(net.Error); ok {
+ if neterr.Temporary() {
+ retry = true
+ }
+ }
+
+ // Retry if we get a 5xx series error.
+ if res != nil {
+ if res.StatusCode >= 500 && res.StatusCode < 600 {
+ retry = true
+ }
+ }
+
+ return retry
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/export_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/export_test.go
new file mode 100644
index 00000000..5f4a9dd0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/export_test.go
@@ -0,0 +1,29 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+)
+
+// V4Signer:
+// Exporting methods for testing
+
+func (s *V4Signer) RequestTime(req *http.Request) time.Time {
+ return s.requestTime(req)
+}
+
+func (s *V4Signer) CanonicalRequest(req *http.Request) string {
+ return s.canonicalRequest(req, "")
+}
+
+func (s *V4Signer) StringToSign(t time.Time, creq string) string {
+ return s.stringToSign(t, creq)
+}
+
+func (s *V4Signer) Signature(t time.Time, sts string) string {
+ return s.signature(t, sts)
+}
+
+func (s *V4Signer) Authorization(header http.Header, t time.Time, signature string) string {
+ return s.authorization(header, t, signature)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/regions.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/regions.go
new file mode 100644
index 00000000..97e12e1d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/regions.go
@@ -0,0 +1,231 @@
+package aws
+
+var USGovWest = Region{
+ "us-gov-west-1",
+ "https://ec2.us-gov-west-1.amazonaws.com",
+ "https://s3-fips-us-gov-west-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "",
+ "https://sns.us-gov-west-1.amazonaws.com",
+ "https://sqs.us-gov-west-1.amazonaws.com",
+ "",
+ "https://iam.us-gov.amazonaws.com",
+ "https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
+ "https://dynamodb.us-gov-west-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature},
+ "https://autoscaling.us-gov-west-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature},
+ "",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-gov-west-1.amazonaws.com",
+ "",
+}
+
+var USEast = Region{
+ "us-east-1",
+ "https://ec2.us-east-1.amazonaws.com",
+ "https://s3.amazonaws.com",
+ "",
+ false,
+ false,
+ "https://sdb.amazonaws.com",
+ "https://sns.us-east-1.amazonaws.com",
+ "https://sqs.us-east-1.amazonaws.com",
+ "https://email.us-east-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-east-1.amazonaws.com",
+ "https://dynamodb.us-east-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature},
+ "https://autoscaling.us-east-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature},
+ "https://kinesis.us-east-1.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-east-1.amazonaws.com",
+ "https://elasticache.us-east-1.amazonaws.com",
+}
+
+var USWest = Region{
+ "us-west-1",
+ "https://ec2.us-west-1.amazonaws.com",
+ "https://s3-us-west-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.us-west-1.amazonaws.com",
+ "https://sns.us-west-1.amazonaws.com",
+ "https://sqs.us-west-1.amazonaws.com",
+ "",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-west-1.amazonaws.com",
+ "https://dynamodb.us-west-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
+ "https://autoscaling.us-west-1.amazonaws.com",
+ ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature},
+ "",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-west-1.amazonaws.com",
+ "https://elasticache.us-west-1.amazonaws.com",
+}
+
+var USWest2 = Region{
+ "us-west-2",
+ "https://ec2.us-west-2.amazonaws.com",
+ "https://s3-us-west-2.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.us-west-2.amazonaws.com",
+ "https://sns.us-west-2.amazonaws.com",
+ "https://sqs.us-west-2.amazonaws.com",
+ "https://email.us-west-2.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.us-west-2.amazonaws.com",
+ "https://dynamodb.us-west-2.amazonaws.com",
+ ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature},
+ "https://autoscaling.us-west-2.amazonaws.com",
+ ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature},
+ "https://kinesis.us-west-2.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.us-west-2.amazonaws.com",
+ "https://elasticache.us-west-2.amazonaws.com",
+}
+
+var EUWest = Region{
+ "eu-west-1",
+ "https://ec2.eu-west-1.amazonaws.com",
+ "https://s3-eu-west-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.eu-west-1.amazonaws.com",
+ "https://sns.eu-west-1.amazonaws.com",
+ "https://sqs.eu-west-1.amazonaws.com",
+ "https://email.eu-west-1.amazonaws.com",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.eu-west-1.amazonaws.com",
+ "https://dynamodb.eu-west-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature},
+ "https://autoscaling.eu-west-1.amazonaws.com",
+ ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature},
+ "https://kinesis.eu-west-1.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.eu-west-1.amazonaws.com",
+ "https://elasticache.eu-west-1.amazonaws.com",
+}
+
+var EUCentral = Region{
+ "eu-central-1",
+ "https://ec2.eu-central-1.amazonaws.com",
+ "https://s3-eu-central-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.eu-central-1.amazonaws.com",
+ "https://sns.eu-central-1.amazonaws.com",
+ "https://sqs.eu-central-1.amazonaws.com",
+ "",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.eu-central-1.amazonaws.com",
+ "https://dynamodb.eu-central-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature},
+ "https://autoscaling.eu-central-1.amazonaws.com",
+ ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature},
+ "https://kinesis.eu-central-1.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.eu-central-1.amazonaws.com",
+ "",
+}
+
+var APSoutheast = Region{
+ "ap-southeast-1",
+ "https://ec2.ap-southeast-1.amazonaws.com",
+ "https://s3-ap-southeast-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.ap-southeast-1.amazonaws.com",
+ "https://sns.ap-southeast-1.amazonaws.com",
+ "https://sqs.ap-southeast-1.amazonaws.com",
+ "",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
+ "https://dynamodb.ap-southeast-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature},
+ "https://autoscaling.ap-southeast-1.amazonaws.com",
+ ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature},
+ "https://kinesis.ap-southeast-1.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.ap-southeast-1.amazonaws.com",
+ "https://elasticache.ap-southeast-1.amazonaws.com",
+}
+
+var APSoutheast2 = Region{
+ "ap-southeast-2",
+ "https://ec2.ap-southeast-2.amazonaws.com",
+ "https://s3-ap-southeast-2.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.ap-southeast-2.amazonaws.com",
+ "https://sns.ap-southeast-2.amazonaws.com",
+ "https://sqs.ap-southeast-2.amazonaws.com",
+ "",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
+ "https://dynamodb.ap-southeast-2.amazonaws.com",
+ ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature},
+ "https://autoscaling.ap-southeast-2.amazonaws.com",
+ ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature},
+ "https://kinesis.ap-southeast-2.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.ap-southeast-2.amazonaws.com",
+ "https://elasticache.ap-southeast-2.amazonaws.com",
+}
+
+var APNortheast = Region{
+ "ap-northeast-1",
+ "https://ec2.ap-northeast-1.amazonaws.com",
+ "https://s3-ap-northeast-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.ap-northeast-1.amazonaws.com",
+ "https://sns.ap-northeast-1.amazonaws.com",
+ "https://sqs.ap-northeast-1.amazonaws.com",
+ "",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
+ "https://dynamodb.ap-northeast-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature},
+ "https://autoscaling.ap-northeast-1.amazonaws.com",
+ ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature},
+ "https://kinesis.ap-northeast-1.amazonaws.com",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.ap-northeast-1.amazonaws.com",
+ "https://elasticache.ap-northeast-1.amazonaws.com",
+}
+
+var SAEast = Region{
+ "sa-east-1",
+ "https://ec2.sa-east-1.amazonaws.com",
+ "https://s3-sa-east-1.amazonaws.com",
+ "",
+ true,
+ true,
+ "https://sdb.sa-east-1.amazonaws.com",
+ "https://sns.sa-east-1.amazonaws.com",
+ "https://sqs.sa-east-1.amazonaws.com",
+ "",
+ "https://iam.amazonaws.com",
+ "https://elasticloadbalancing.sa-east-1.amazonaws.com",
+ "https://dynamodb.sa-east-1.amazonaws.com",
+ ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature},
+ "https://autoscaling.sa-east-1.amazonaws.com",
+ ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature},
+ "",
+ "https://sts.amazonaws.com",
+ "https://cloudformation.sa-east-1.amazonaws.com",
+ "https://elasticache.sa-east-1.amazonaws.com",
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/retry.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/retry.go
new file mode 100644
index 00000000..bea964b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/retry.go
@@ -0,0 +1,136 @@
+package aws
+
+import (
+ "math/rand"
+ "net"
+ "net/http"
+ "time"
+)
+
+const (
+ maxDelay = 20 * time.Second
+ defaultScale = 300 * time.Millisecond
+ throttlingScale = 500 * time.Millisecond
+ throttlingScaleRange = throttlingScale / 4
+ defaultMaxRetries = 3
+ dynamoDBScale = 25 * time.Millisecond
+ dynamoDBMaxRetries = 10
+)
+
+// A RetryPolicy encapsulates a strategy for implementing client retries.
+//
+// Default implementations are provided which match the AWS SDKs.
+type RetryPolicy interface {
+ // ShouldRetry returns whether a client should retry a failed request.
+ ShouldRetry(target string, r *http.Response, err error, numRetries int) bool
+
+ // Delay returns the time a client should wait before issuing a retry.
+ Delay(target string, r *http.Response, err error, numRetries int) time.Duration
+}
+
+// DefaultRetryPolicy implements the AWS SDK default retry policy.
+//
+// It will retry up to 3 times, and uses an exponential backoff with a scale
+// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of
+// throttling, the delay will also include some randomness.
+//
+// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90.
+type DefaultRetryPolicy struct {
+}
+
+// ShouldRetry implements the RetryPolicy ShouldRetry method.
+func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
+ return shouldRetry(r, err, numRetries, defaultMaxRetries)
+}
+
+// Delay implements the RetryPolicy Delay method.
+func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
+ scale := defaultScale
+ if err, ok := err.(*Error); ok && isThrottlingException(err) {
+ scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange)))
+ }
+ return exponentialBackoff(numRetries, scale)
+}
+
+// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy.
+//
+// It will retry up to 10 times, and uses an exponential backoff with a scale
+// factor of 25ms (25ms, 50ms, 100ms, ...).
+//
+// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103.
+type DynamoDBRetryPolicy struct {
+}
+
+// ShouldRetry implements the RetryPolicy ShouldRetry method.
+func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
+ return shouldRetry(r, err, numRetries, dynamoDBMaxRetries)
+}
+
+// Delay implements the RetryPolicy Delay method.
+func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
+ return exponentialBackoff(numRetries, dynamoDBScale)
+}
+
+// NeverRetryPolicy never retries requests and returns immediately on failure.
+type NeverRetryPolicy struct {
+}
+
+// ShouldRetry implements the RetryPolicy ShouldRetry method.
+func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
+ return false
+}
+
+// Delay implements the RetryPolicy Delay method.
+func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
+ return time.Duration(0)
+}
+
+// shouldRetry determines if we should retry the request.
+//
+// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html.
+func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool {
+ // Once we've exceeded the max retry attempts, game over.
+ if numRetries >= maxRetries {
+ return false
+ }
+
+ // Always retry temporary network errors.
+ if err, ok := err.(net.Error); ok && err.Temporary() {
+ return true
+ }
+
+ // Always retry 5xx responses.
+ if r != nil && r.StatusCode >= 500 {
+ return true
+ }
+
+ // Always retry throttling exceptions.
+ if err, ok := err.(ServiceError); ok && isThrottlingException(err) {
+ return true
+ }
+
+ // Other classes of failures indicate a problem with the request. Retrying
+ // won't help.
+ return false
+}
+
+func exponentialBackoff(numRetries int, scale time.Duration) time.Duration {
+ if numRetries < 0 {
+ return time.Duration(0)
+ }
+
+ delay := (1 << uint(numRetries)) * scale
+ if delay > maxDelay {
+ return maxDelay
+ }
+ return delay
+}
+
+func isThrottlingException(err ServiceError) bool {
+ switch err.ErrorCode() {
+ case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/retry_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/retry_test.go
new file mode 100644
index 00000000..c1f10be4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/retry_test.go
@@ -0,0 +1,303 @@
+package aws
+
+import (
+ "math/rand"
+ "net"
+ "net/http"
+ "testing"
+ "time"
+)
+
+type testInput struct {
+ res *http.Response
+ err error
+ numRetries int
+}
+
+type testResult struct {
+ shouldRetry bool
+ delay time.Duration
+}
+
+type testCase struct {
+ input testInput
+ defaultResult testResult
+ dynamoDBResult testResult
+}
+
+var testCases = []testCase{
+ // Test nil fields
+ testCase{
+ input: testInput{
+ err: nil,
+ res: nil,
+ numRetries: 0,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: 300 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: false,
+ delay: 25 * time.Millisecond,
+ },
+ },
+ // Test 3 different throttling exceptions
+ testCase{
+ input: testInput{
+ err: &Error{
+ Code: "Throttling",
+ },
+ numRetries: 0,
+ },
+ defaultResult: testResult{
+ shouldRetry: true,
+ delay: 617165505 * time.Nanosecond, // account for randomness with known seed
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 25 * time.Millisecond,
+ },
+ },
+ testCase{
+ input: testInput{
+ err: &Error{
+ Code: "ThrottlingException",
+ },
+ numRetries: 0,
+ },
+ defaultResult: testResult{
+ shouldRetry: true,
+ delay: 579393152 * time.Nanosecond, // account for randomness with known seed
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 25 * time.Millisecond,
+ },
+ },
+ testCase{
+ input: testInput{
+ err: &Error{
+ Code: "ProvisionedThroughputExceededException",
+ },
+ numRetries: 1,
+ },
+ defaultResult: testResult{
+ shouldRetry: true,
+ delay: 1105991654 * time.Nanosecond, // account for randomness with known seed
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 50 * time.Millisecond,
+ },
+ },
+ // Test a fake throttling exception
+ testCase{
+ input: testInput{
+ err: &Error{
+ Code: "MyMadeUpThrottlingCode",
+ },
+ numRetries: 0,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: 300 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: false,
+ delay: 25 * time.Millisecond,
+ },
+ },
+ // Test 5xx errors
+ testCase{
+ input: testInput{
+ res: &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ },
+ numRetries: 1,
+ },
+ defaultResult: testResult{
+ shouldRetry: true,
+ delay: 600 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 50 * time.Millisecond,
+ },
+ },
+ testCase{
+ input: testInput{
+ res: &http.Response{
+ StatusCode: http.StatusServiceUnavailable,
+ },
+ numRetries: 1,
+ },
+ defaultResult: testResult{
+ shouldRetry: true,
+ delay: 600 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 50 * time.Millisecond,
+ },
+ },
+ // Test a random 400 error
+ testCase{
+ input: testInput{
+ res: &http.Response{
+ StatusCode: http.StatusNotFound,
+ },
+ numRetries: 1,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: 600 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: false,
+ delay: 50 * time.Millisecond,
+ },
+ },
+ // Test a temporary net.Error
+ testCase{
+ input: testInput{
+ res: &http.Response{},
+ err: &net.DNSError{
+ IsTimeout: true,
+ },
+ numRetries: 2,
+ },
+ defaultResult: testResult{
+ shouldRetry: true,
+ delay: 1200 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 100 * time.Millisecond,
+ },
+ },
+ // Test a non-temporary net.Error
+ testCase{
+ input: testInput{
+ res: &http.Response{},
+ err: &net.DNSError{
+ IsTimeout: false,
+ },
+ numRetries: 3,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: 2400 * time.Millisecond,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: false,
+ delay: 200 * time.Millisecond,
+ },
+ },
+ // Assert failure after hitting max default retries
+ testCase{
+ input: testInput{
+ err: &Error{
+ Code: "ProvisionedThroughputExceededException",
+ },
+ numRetries: defaultMaxRetries,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: 4313582352 * time.Nanosecond, // account for randomness with known seed
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: true,
+ delay: 200 * time.Millisecond,
+ },
+ },
+ // Assert failure after hitting max DynamoDB retries
+ testCase{
+ input: testInput{
+ err: &Error{
+ Code: "ProvisionedThroughputExceededException",
+ },
+ numRetries: dynamoDBMaxRetries,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: maxDelay,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: false,
+ delay: maxDelay,
+ },
+ },
+ // Assert we never go over the maxDelay value
+ testCase{
+ input: testInput{
+ numRetries: 25,
+ },
+ defaultResult: testResult{
+ shouldRetry: false,
+ delay: maxDelay,
+ },
+ dynamoDBResult: testResult{
+ shouldRetry: false,
+ delay: maxDelay,
+ },
+ },
+}
+
+func TestDefaultRetryPolicy(t *testing.T) {
+ rand.Seed(0)
+ var policy RetryPolicy
+ policy = &DefaultRetryPolicy{}
+ for _, test := range testCases {
+ res := test.input.res
+ err := test.input.err
+ numRetries := test.input.numRetries
+
+ shouldRetry := policy.ShouldRetry("", res, err, numRetries)
+ if shouldRetry != test.defaultResult.shouldRetry {
+ t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, test.defaultResult.shouldRetry, res, err, numRetries)
+ }
+ delay := policy.Delay("", res, err, numRetries)
+ if delay != test.defaultResult.delay {
+ t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, test.defaultResult.delay, res, err, numRetries)
+ }
+ }
+}
+
+func TestDynamoDBRetryPolicy(t *testing.T) {
+ var policy RetryPolicy
+ policy = &DynamoDBRetryPolicy{}
+ for _, test := range testCases {
+ res := test.input.res
+ err := test.input.err
+ numRetries := test.input.numRetries
+
+ shouldRetry := policy.ShouldRetry("", res, err, numRetries)
+ if shouldRetry != test.dynamoDBResult.shouldRetry {
+ t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, test.dynamoDBResult.shouldRetry, res, err, numRetries)
+ }
+ delay := policy.Delay("", res, err, numRetries)
+ if delay != test.dynamoDBResult.delay {
+ t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, test.dynamoDBResult.delay, res, err, numRetries)
+ }
+ }
+}
+
+func TestNeverRetryPolicy(t *testing.T) {
+ var policy RetryPolicy
+ policy = &NeverRetryPolicy{}
+ for _, test := range testCases {
+ res := test.input.res
+ err := test.input.err
+ numRetries := test.input.numRetries
+
+ shouldRetry := policy.ShouldRetry("", res, err, numRetries)
+ if shouldRetry {
+ t.Errorf("ShouldRetry returned %v, expected %v res=%#v err=%#v numRetries=%d", shouldRetry, false, res, err, numRetries)
+ }
+ delay := policy.Delay("", res, err, numRetries)
+ if delay != time.Duration(0) {
+ t.Errorf("Delay returned %v, expected %v res=%#v err=%#v numRetries=%d", delay, time.Duration(0), res, err, numRetries)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/sign.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/sign.go
new file mode 100644
index 00000000..4aeb3c38
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/sign.go
@@ -0,0 +1,381 @@
+package aws
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strings"
+ "time"
+)
+
+type V2Signer struct {
+ auth Auth
+ service ServiceInfo
+ host string
+}
+
+var b64 = base64.StdEncoding
+
+func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) {
+ u, err := url.Parse(service.Endpoint)
+ if err != nil {
+ return nil, err
+ }
+ return &V2Signer{auth: auth, service: service, host: u.Host}, nil
+}
+
+func (s *V2Signer) Sign(method, path string, params map[string]string) {
+ params["AWSAccessKeyId"] = s.auth.AccessKey
+ params["SignatureVersion"] = "2"
+ params["SignatureMethod"] = "HmacSHA256"
+ if s.auth.Token() != "" {
+ params["SecurityToken"] = s.auth.Token()
+ }
+
+ // AWS specifies that the parameters in a signed request must
+ // be provided in the natural order of the keys. This is distinct
+ // from the natural order of the encoded value of key=value.
+ // Percent and gocheck.Equals affect the sorting order.
+ var keys, sarray []string
+ for k, _ := range params {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ sarray = append(sarray, Encode(k)+"="+Encode(params[k]))
+ }
+ joined := strings.Join(sarray, "&")
+ payload := method + "\n" + s.host + "\n" + path + "\n" + joined
+ hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
+ hash.Write([]byte(payload))
+ signature := make([]byte, b64.EncodedLen(hash.Size()))
+ b64.Encode(signature, hash.Sum(nil))
+
+ params["Signature"] = string(signature)
+}
+
+// Common date formats for signing requests
+const (
+ ISO8601BasicFormat = "20060102T150405Z"
+ ISO8601BasicFormatShort = "20060102"
+)
+
+type Route53Signer struct {
+ auth Auth
+}
+
+func NewRoute53Signer(auth Auth) *Route53Signer {
+ return &Route53Signer{auth: auth}
+}
+
+// getCurrentDate fetches the date stamp from the aws servers to
+// ensure the auth headers are within 5 minutes of the server time
+func (s *Route53Signer) getCurrentDate() string {
+ response, err := http.Get("https://route53.amazonaws.com/date")
+ if err != nil {
+ fmt.Print("Unable to get date from amazon: ", err)
+ return ""
+ }
+
+ response.Body.Close()
+ return response.Header.Get("Date")
+}
+
+// Creates the authorize signature based on the date stamp and secret key
+func (s *Route53Signer) getHeaderAuthorize(message string) string {
+ hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey))
+ hmacSha256.Write([]byte(message))
+ cryptedString := hmacSha256.Sum(nil)
+
+ return base64.StdEncoding.EncodeToString(cryptedString)
+}
+
+// Adds all the required headers for AWS Route53 API to the request
+// including the authorization
+func (s *Route53Signer) Sign(req *http.Request) {
+ date := s.getCurrentDate()
+ authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s",
+ s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date))
+
+ req.Header.Set("Host", req.Host)
+ req.Header.Set("X-Amzn-Authorization", authHeader)
+ req.Header.Set("X-Amz-Date", date)
+ req.Header.Set("Content-Type", "application/xml")
+ if s.auth.Token() != "" {
+ req.Header.Set("X-Amzn-Security-Token", s.auth.Token())
+ }
+}
+
+/*
+The V4Signer encapsulates all of the functionality to sign a request with the AWS
+Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
+*/
+type V4Signer struct {
+ auth Auth
+ serviceName string
+ region Region
+ // Add the x-amz-content-sha256 header
+ IncludeXAmzContentSha256 bool
+}
+
+/*
+Return a new instance of a V4Signer capable of signing AWS requests.
+*/
+func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer {
+ return &V4Signer{
+ auth: auth,
+ serviceName: serviceName,
+ region: region,
+ IncludeXAmzContentSha256: false,
+ }
+}
+
+/*
+Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
+
+The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date"
+or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires
+the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from
+the request.Host.
+
+The signed request will include a new "Authorization" header indicating that the request has been signed.
+
+Any changes to the request after signing the request will invalidate the signature.
+*/
+func (s *V4Signer) Sign(req *http.Request) {
+ req.Header.Set("host", req.Host) // host header must be included as a signed header
+ payloadHash := s.payloadHash(req)
+ if s.IncludeXAmzContentSha256 {
+ req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash
+ }
+ t := s.requestTime(req) // Get request time
+ creq := s.canonicalRequest(req, payloadHash) // Build canonical request
+ sts := s.stringToSign(t, creq) // Build string to sign
+ signature := s.signature(t, sts) // Calculate the AWS Signature Version 4
+ auth := s.authorization(req.Header, t, signature) // Create Authorization header value
+ req.Header.Set("Authorization", auth) // Add Authorization header to request
+ return
+}
+
+/*
+requestTime method will parse the time from the request "x-amz-date" or "date" headers.
+If the "x-amz-date" header is present, that will take priority over the "date" header.
+If neither header is defined or we are unable to parse either header as a valid date
+then we will create a new "x-amz-date" header with the current time.
+*/
+func (s *V4Signer) requestTime(req *http.Request) time.Time {
+
+ // Get "x-amz-date" header
+ date := req.Header.Get("x-amz-date")
+
+ // Attempt to parse as ISO8601BasicFormat
+ t, err := time.Parse(ISO8601BasicFormat, date)
+ if err == nil {
+ return t
+ }
+
+ // Attempt to parse as http.TimeFormat
+ t, err = time.Parse(http.TimeFormat, date)
+ if err == nil {
+ req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
+ return t
+ }
+
+ // Get "date" header
+ date = req.Header.Get("date")
+
+ // Attempt to parse as http.TimeFormat
+ t, err = time.Parse(http.TimeFormat, date)
+ if err == nil {
+ return t
+ }
+
+ // Create a current time header to be used
+ t = time.Now().UTC()
+ req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
+ return t
+}
+
+/*
+canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S)
+
+ CanonicalRequest =
+ HTTPRequestMethod + '\n' +
+ CanonicalURI + '\n' +
+ CanonicalQueryString + '\n' +
+ CanonicalHeaders + '\n' +
+ SignedHeaders + '\n' +
+ HexEncode(Hash(Payload))
+
+payloadHash is optional; use the empty string and it will be calculated from the request
+*/
+func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string {
+ if payloadHash == "" {
+ payloadHash = s.payloadHash(req)
+ }
+ c := new(bytes.Buffer)
+ fmt.Fprintf(c, "%s\n", req.Method)
+ fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL))
+ fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL))
+ fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header))
+ fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header))
+ fmt.Fprintf(c, "%s", payloadHash)
+ return c.String()
+}
+
+func (s *V4Signer) canonicalURI(u *url.URL) string {
+ u = &url.URL{Path: u.Path}
+ canonicalPath := u.String()
+
+ slash := strings.HasSuffix(canonicalPath, "/")
+ canonicalPath = path.Clean(canonicalPath)
+
+ if canonicalPath == "" || canonicalPath == "." {
+ canonicalPath = "/"
+ }
+
+ if canonicalPath != "/" && slash {
+ canonicalPath += "/"
+ }
+
+ return canonicalPath
+}
+
+func (s *V4Signer) canonicalQueryString(u *url.URL) string {
+ var a []string
+ for k, vs := range u.Query() {
+ k = url.QueryEscape(k)
+ for _, v := range vs {
+ if v == "" {
+ a = append(a, k+"=")
+ } else {
+ v = url.QueryEscape(v)
+ a = append(a, k+"="+v)
+ }
+ }
+ }
+ sort.Strings(a)
+ return strings.Join(a, "&")
+}
+
+func (s *V4Signer) canonicalHeaders(h http.Header) string {
+ i, a := 0, make([]string, len(h))
+ for k, v := range h {
+ for j, w := range v {
+ v[j] = strings.Trim(w, " ")
+ }
+ sort.Strings(v)
+ a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",")
+ i++
+ }
+ sort.Strings(a)
+ return strings.Join(a, "\n")
+}
+
+func (s *V4Signer) signedHeaders(h http.Header) string {
+ i, a := 0, make([]string, len(h))
+ for k, _ := range h {
+ a[i] = strings.ToLower(k)
+ i++
+ }
+ sort.Strings(a)
+ return strings.Join(a, ";")
+}
+
+func (s *V4Signer) payloadHash(req *http.Request) string {
+ var b []byte
+ if req.Body == nil {
+ b = []byte("")
+ } else {
+ var err error
+ b, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ // TODO: I REALLY DON'T LIKE THIS PANIC!!!!
+ panic(err)
+ }
+ }
+ req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+ return s.hash(string(b))
+}
+
+/*
+stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu)
+
+ StringToSign =
+ Algorithm + '\n' +
+ RequestDate + '\n' +
+ CredentialScope + '\n' +
+ HexEncode(Hash(CanonicalRequest))
+*/
+func (s *V4Signer) stringToSign(t time.Time, creq string) string {
+ w := new(bytes.Buffer)
+ fmt.Fprint(w, "AWS4-HMAC-SHA256\n")
+ fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat))
+ fmt.Fprintf(w, "%s\n", s.credentialScope(t))
+ fmt.Fprintf(w, "%s", s.hash(creq))
+ return w.String()
+}
+
+func (s *V4Signer) credentialScope(t time.Time) string {
+ return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName)
+}
+
+/*
+signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1)
+
+ signature = HexEncode(HMAC(derived-signing-key, string-to-sign))
+*/
+func (s *V4Signer) signature(t time.Time, sts string) string {
+ h := s.hmac(s.derivedKey(t), []byte(sts))
+ return fmt.Sprintf("%x", h)
+}
+
+/*
+derivedKey method derives a signing key to be used for signing a request.
+
+ kSecret = Your AWS Secret Access Key
+ kDate = HMAC("AWS4" + kSecret, Date)
+ kRegion = HMAC(kDate, Region)
+ kService = HMAC(kRegion, Service)
+ kSigning = HMAC(kService, "aws4_request")
+*/
+func (s *V4Signer) derivedKey(t time.Time) []byte {
+ h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort)))
+ h = s.hmac(h, []byte(s.region.Name))
+ h = s.hmac(h, []byte(s.serviceName))
+ h = s.hmac(h, []byte("aws4_request"))
+ return h
+}
+
+/*
+authorization method generates the authorization header value.
+*/
+func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string {
+ w := new(bytes.Buffer)
+ fmt.Fprint(w, "AWS4-HMAC-SHA256 ")
+ fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t))
+ fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header))
+ fmt.Fprintf(w, "Signature=%s", signature)
+ return w.String()
+}
+
+// hash method calculates the sha256 hash for a given string
+func (s *V4Signer) hash(in string) string {
+ h := sha256.New()
+ fmt.Fprintf(h, "%s", in)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+// hmac method calculates the sha256 hmac for a given slice of bytes
+func (s *V4Signer) hmac(key, data []byte) []byte {
+ h := hmac.New(sha256.New, key)
+ h.Write(data)
+ return h.Sum(nil)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/sign_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/sign_test.go
new file mode 100644
index 00000000..d172bdb6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/aws/sign_test.go
@@ -0,0 +1,569 @@
+package aws_test
+
+import (
+ "fmt"
+ "github.com/crowdmob/goamz/aws"
+ "gopkg.in/check.v1"
+ "net/http"
+ "strings"
+ "time"
+)
+
+var _ = check.Suite(&V4SignerSuite{})
+
+type V4SignerSuite struct {
+ auth aws.Auth
+ region aws.Region
+ cases []V4SignerSuiteCase
+}
+
+type V4SignerSuiteCase struct {
+ label string
+ request V4SignerSuiteCaseRequest
+ canonicalRequest string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+type V4SignerSuiteCaseRequest struct {
+ method string
+ host string
+ url string
+ headers []string
+ body string
+}
+
+func (s *V4SignerSuite) SetUpSuite(c *check.C) {
+ s.auth = aws.Auth{AccessKey: "AKIDEXAMPLE", SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"}
+ s.region = aws.USEast
+
+ // Test cases from the Signature Version 4 Test Suite (http://goo.gl/nguvs0)
+ s.cases = append(s.cases,
+
+ // get-header-key-duplicate
+ V4SignerSuiteCase{
+ label: "get-header-key-duplicate",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar", "zoo:foobar", "zoo:zoobar"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:foobar,zoobar,zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3c52f0eaae2b61329c0a332e3fa15842a37bc5812cf4d80eb64784308850e313",
+ signature: "54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=54afcaaf45b331f81cd2edb974f7b824ff4dd594cbbaa945ed636b48477368ed",
+ },
+
+ // get-header-value-order
+ V4SignerSuiteCase{
+ label: "get-header-value-order",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p:z", "p:a", "p:p", "p:a"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:a,a,p,z\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n94c0389fefe0988cbbedc8606f0ca0b485b48da010d09fc844b45b697c8924fe",
+ signature: "d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=d2973954263943b11624a11d1c963ca81fb274169c7868b2858c04f083199e3d",
+ },
+
+ // get-header-value-trim
+ V4SignerSuiteCase{
+ label: "get-header-value-trim",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "p: phfft "},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\np:phfft\n\ndate;host;p\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndddd1902add08da1ac94782b05f9278c08dc7468db178a84f8950d93b30b1f35",
+ signature: "debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;p, Signature=debf546796015d6f6ded8626f5ce98597c33b47b9164cf6b17b4642036fcb592",
+ },
+
+ // get-empty
+ V4SignerSuiteCase{
+ label: "get-relative-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-single-relative
+ V4SignerSuiteCase{
+ label: "get-relative-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/.",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-multiple-relative
+ V4SignerSuiteCase{
+ label: "get-relative-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/./././",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-relative-relative
+ V4SignerSuiteCase{
+ label: "get-relative-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/foo/bar/../..",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-relative
+ V4SignerSuiteCase{
+ label: "get-relative",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/foo/..",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-slash-dot-slash
+ V4SignerSuiteCase{
+ label: "get-slash-dot-slash",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/./",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-slash-pointless-dot
+ V4SignerSuiteCase{
+ label: "get-slash-pointless-dot",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/./foo",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n8021a97572ee460f87ca67f4e8c0db763216d84715f5424a843a5312a3321e2d",
+ signature: "910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a",
+ },
+
+ // get-slash
+ V4SignerSuiteCase{
+ label: "get-slash",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "//",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-slashes
+ V4SignerSuiteCase{
+ label: "get-slashes",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "//foo//",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/foo/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n6bb4476ee8745730c9cb79f33a0c70baa6d8af29c0077fa12e4e8f1dd17e7098",
+ signature: "b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19",
+ },
+
+ // get-space
+ V4SignerSuiteCase{
+ label: "get-space",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/%20/foo",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/%20/foo\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n69c45fb9fe3fd76442b5086e50b2e9fec8298358da957b293ef26e506fdfb54b",
+ signature: "f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f309cfbd10197a230c42dd17dbf5cca8a0722564cb40a872d25623cfa758e374",
+ },
+
+ // get-unreserved
+ V4SignerSuiteCase{
+ label: "get-unreserved",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ndf63ee3247c0356c696a3b21f8d8490b01fa9cd5bc6550ef5ef5f4636b7b8901",
+ signature: "830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e",
+ },
+
+ // get-utf8
+ V4SignerSuiteCase{
+ label: "get-utf8",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/%E1%88%B4",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/%E1%88%B4\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n27ba31df5dbc6e063d8f87d62eb07143f7f271c5330a917840586ac1c85b6f6b",
+ signature: "8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=8d6634c189aa8c75c2e51e106b6b5121bed103fdb351f7d7d4381c738823af74",
+ },
+
+ // get-vanilla-empty-query-key
+ V4SignerSuiteCase{
+ label: "get-vanilla-empty-query-key",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?foo=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n0846c2945b0832deb7a463c66af5c4f8bd54ec28c438e67a214445b157c9ddf8",
+ signature: "56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=56c054473fd260c13e4e7393eb203662195f5d4a1fada5314b8b52b23f985e9f",
+ },
+
+ // get-vanilla-query-order-key-case
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-order-key-case",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?foo=Zoo&foo=aha",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\nfoo=Zoo&foo=aha\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ne25f777ba161a0f1baf778a87faf057187cf5987f17953320e3ca399feb5f00d",
+ signature: "be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09",
+ },
+
+ // get-vanilla-query-order-key
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-order-key",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?a=foo&b=foo",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\na=foo&b=foo\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n2f23d14fe13caebf6dfda346285c6d9c14f49eaca8f5ec55c627dd7404f7a727",
+ signature: "0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b",
+ },
+
+ // get-vanilla-query-order-value
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-order-value",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?foo=b&foo=a",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\nfoo=a&foo=b\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n33dffc220e89131f8f6157a35c40903daa658608d9129ff9489e5cf5bbd9b11b",
+ signature: "feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=feb926e49e382bec75c9d7dcb2a1b6dc8aa50ca43c25d2bc51143768c0875acc",
+ },
+
+ // get-vanilla-query-unreserved
+ V4SignerSuiteCase{
+ label: "get-vanilla-query-unreserved",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nd2578f3156d4c9d180713d1ff20601d8a3eed0dd35447d24603d7d67414bd6b5",
+ signature: "f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=f1498ddb4d6dae767d97c466fb92f1b59a2c71ca29ac954692663f9db03426fb",
+ },
+
+ // get-vanilla-query
+ V4SignerSuiteCase{
+ label: "get-vanilla-query",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // get-vanilla-ut8-query
+ V4SignerSuiteCase{
+ label: "get-vanilla-ut8-query",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/?ሴ=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n%E1%88%B4=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nde5065ff39c131e6c2e2bd19cd9345a794bf3b561eab20b8d97b2093fc2a979e",
+ signature: "6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=6fb359e9a05394cc7074e0feb42573a2601abc0c869a953e8c5c12e4e01f1a8c",
+ },
+
+ // get-vanilla
+ V4SignerSuiteCase{
+ label: "get-vanilla",
+ request: V4SignerSuiteCaseRequest{
+ method: "GET",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "GET\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n366b91fb121d72a00f46bbe8d395f53a102b06dfb7e79636515208ed3fa606b1",
+ signature: "b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470",
+ },
+
+ // post-header-key-case
+ V4SignerSuiteCase{
+ label: "post-header-key-case",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4",
+ signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ },
+
+ // post-header-key-sort
+ V4SignerSuiteCase{
+ label: "post-header-key-sort",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "ZOO:zoobar"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:zoobar\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n34e1bddeb99e76ee01d63b5e28656111e210529efeec6cdfd46a48e4c734545d",
+ signature: "b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=b7a95a52518abbca0964a999a880429ab734f35ebbf1235bd79a5de87756dc4a",
+ },
+
+ // post-header-value-case
+ V4SignerSuiteCase{
+ label: "post-header-value-case",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"DATE:Mon, 09 Sep 2011 23:36:00 GMT", "zoo:ZOOBAR"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\nzoo:ZOOBAR\n\ndate;host;zoo\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n3aae6d8274b8c03e2cc96fc7d6bda4b9bd7a0a184309344470b2c96953e124aa",
+ signature: "273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host;zoo, Signature=273313af9d0c265c531e11db70bbd653f3ba074c1009239e8559d3987039cad7",
+ },
+
+ // post-vanilla-empty-query-value
+ V4SignerSuiteCase{
+ label: "post-vanilla-empty-query-value",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/?foo=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e",
+ signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ },
+
+ // post-vanilla-query
+ V4SignerSuiteCase{
+ label: "post-vanilla-query",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/?foo=bar",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\nfoo=bar\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\ncd4f39132d8e60bb388831d734230460872b564871c47f5de62e62d1a68dbe1e",
+ signature: "b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b6e3b79003ce0743a491606ba1035a804593b0efb1e20a11cba83f8c25a57a92",
+ },
+
+ // post-vanilla
+ V4SignerSuiteCase{
+ label: "post-vanilla",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ },
+ canonicalRequest: "POST\n/\n\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ndate;host\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n05da62cee468d24ae84faff3c39f1b85540de60243c1bcaace39c0a2acc7b2c4",
+ signature: "22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726",
+ },
+
+ // post-x-www-form-urlencoded-parameters
+ V4SignerSuiteCase{
+ label: "post-x-www-form-urlencoded-parameters",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Content-Type:application/x-www-form-urlencoded; charset=utf8", "Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ body: "foo=bar",
+ },
+ canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded; charset=utf8\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\nc4115f9e54b5cecf192b1eaa23b8e88ed8dc5391bd4fde7b3fff3d9c9fe0af1f",
+ signature: "b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71",
+ },
+
+ // post-x-www-form-urlencoded
+ V4SignerSuiteCase{
+ label: "post-x-www-form-urlencoded",
+ request: V4SignerSuiteCaseRequest{
+ method: "POST",
+ host: "host.foo.com",
+ url: "/",
+ headers: []string{"Content-Type:application/x-www-form-urlencoded", "Date:Mon, 09 Sep 2011 23:36:00 GMT"},
+ body: "foo=bar",
+ },
+ canonicalRequest: "POST\n/\n\ncontent-type:application/x-www-form-urlencoded\ndate:Mon, 09 Sep 2011 23:36:00 GMT\nhost:host.foo.com\n\ncontent-type;date;host\n3ba8907e7a252327488df390ed517c45b96dead033600219bdca7107d1d3f88a",
+ stringToSign: "AWS4-HMAC-SHA256\n20110909T233600Z\n20110909/us-east-1/host/aws4_request\n4c5c6e4b52fb5fb947a8733982a8a5a61b14f04345cbfe6e739236c76dd48f74",
+ signature: "5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
+ authorization: "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=5a15b22cf462f047318703b92e6f4f38884e4a7ab7b1d6426ca46a8bd1c26cbc",
+ },
+ )
+}
+
+func (s *V4SignerSuite) TestCases(c *check.C) {
+ signer := aws.NewV4Signer(s.auth, "host", s.region)
+
+ for _, testCase := range s.cases {
+
+ req, err := http.NewRequest(testCase.request.method, "http://"+testCase.request.host+testCase.request.url, strings.NewReader(testCase.request.body))
+ c.Assert(err, check.IsNil, check.Commentf("Testcase: %s", testCase.label))
+ for _, v := range testCase.request.headers {
+ h := strings.SplitN(v, ":", 2)
+ req.Header.Add(h[0], h[1])
+ }
+ req.Header.Set("host", req.Host)
+
+ t := signer.RequestTime(req)
+
+ canonicalRequest := signer.CanonicalRequest(req)
+ c.Check(canonicalRequest, check.Equals, testCase.canonicalRequest, check.Commentf("Testcase: %s", testCase.label))
+
+ stringToSign := signer.StringToSign(t, canonicalRequest)
+ c.Check(stringToSign, check.Equals, testCase.stringToSign, check.Commentf("Testcase: %s", testCase.label))
+
+ signature := signer.Signature(t, stringToSign)
+ c.Check(signature, check.Equals, testCase.signature, check.Commentf("Testcase: %s", testCase.label))
+
+ authorization := signer.Authorization(req.Header, t, signature)
+ c.Check(authorization, check.Equals, testCase.authorization, check.Commentf("Testcase: %s", testCase.label))
+
+ signer.Sign(req)
+ c.Check(req.Header.Get("Authorization"), check.Equals, testCase.authorization, check.Commentf("Testcase: %s", testCase.label))
+ }
+}
+
+func ExampleV4Signer() {
+ // Get auth from env vars
+ auth, err := aws.EnvAuth()
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ // Create a signer with the auth, name of the service, and aws region
+ signer := aws.NewV4Signer(auth, "dynamodb", aws.USEast)
+
+ // Create a request
+ req, err := http.NewRequest("POST", aws.USEast.DynamoDBEndpoint, strings.NewReader("sample_request"))
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ // Date or x-amz-date header is required to sign a request
+ req.Header.Add("Date", time.Now().UTC().Format(http.TimeFormat))
+
+ // Sign the request
+ signer.Sign(req)
+
+ // Issue signed request
+ http.DefaultClient.Do(req)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/cloudfront.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/cloudfront.go
new file mode 100644
index 00000000..c13bbeec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/cloudfront.go
@@ -0,0 +1,143 @@
+package cloudfront
+
+import (
+ "crypto"
+ "crypto/rsa"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "github.com/crowdmob/goamz/aws"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type CloudFront struct {
+ BaseURL string
+ keyPairId string
+ key *rsa.PrivateKey
+}
+
+var base64Replacer = strings.NewReplacer("=", "_", "+", "-", "/", "~")
+
+func NewKeyLess(auth aws.Auth, baseurl string) *CloudFront {
+ return &CloudFront{keyPairId: auth.AccessKey, BaseURL: baseurl}
+}
+
+func New(baseurl string, key *rsa.PrivateKey, keyPairId string) *CloudFront {
+ return &CloudFront{
+ BaseURL: baseurl,
+ keyPairId: keyPairId,
+ key: key,
+ }
+}
+
+type epochTime struct {
+ EpochTime int64 `json:"AWS:EpochTime"`
+}
+
+type condition struct {
+ DateLessThan epochTime
+}
+
+type statement struct {
+ Resource string
+ Condition condition
+}
+
+type policy struct {
+ Statement []statement
+}
+
+func buildPolicy(resource string, expireTime time.Time) ([]byte, error) {
+ p := &policy{
+ Statement: []statement{
+ statement{
+ Resource: resource,
+ Condition: condition{
+ DateLessThan: epochTime{
+ EpochTime: expireTime.Truncate(time.Millisecond).Unix(),
+ },
+ },
+ },
+ },
+ }
+
+ return json.Marshal(p)
+}
+
+func (cf *CloudFront) generateSignature(policy []byte) (string, error) {
+ hash := sha1.New()
+ _, err := hash.Write(policy)
+ if err != nil {
+ return "", err
+ }
+
+ hashed := hash.Sum(nil)
+ var signed []byte
+ if cf.key.Validate() == nil {
+ signed, err = rsa.SignPKCS1v15(nil, cf.key, crypto.SHA1, hashed)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ signed = hashed
+ }
+ encoded := base64Replacer.Replace(base64.StdEncoding.EncodeToString(signed))
+
+ return encoded, nil
+}
+
+// Creates a signed url using RSAwithSHA1 as specified by
+// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-signature
+func (cf *CloudFront) CannedSignedURL(path, queryString string, expires time.Time) (string, error) {
+ resource := cf.BaseURL + path
+ if queryString != "" {
+ resource = path + "?" + queryString
+ }
+
+ policy, err := buildPolicy(resource, expires)
+ if err != nil {
+ return "", err
+ }
+
+ signature, err := cf.generateSignature(policy)
+ if err != nil {
+ return "", err
+ }
+
+ // TOOD: Do this once
+ uri, err := url.Parse(cf.BaseURL)
+ if err != nil {
+ return "", err
+ }
+
+ uri.RawQuery = queryString
+ if queryString != "" {
+ uri.RawQuery += "&"
+ }
+
+ expireTime := expires.Truncate(time.Millisecond).Unix()
+
+ uri.Path = path
+ uri.RawQuery += fmt.Sprintf("Expires=%d&Signature=%s&Key-Pair-Id=%s", expireTime, signature, cf.keyPairId)
+
+ return uri.String(), nil
+}
+
+func (cloudfront *CloudFront) SignedURL(path, querystrings string, expires time.Time) string {
+ policy := `{"Statement":[{"Resource":"` + path + "?" + querystrings + `,"Condition":{"DateLessThan":{"AWS:EpochTime":` + strconv.FormatInt(expires.Truncate(time.Millisecond).Unix(), 10) + `}}}]}`
+
+ hash := sha1.New()
+ hash.Write([]byte(policy))
+ b := hash.Sum(nil)
+ he := base64.StdEncoding.EncodeToString(b)
+
+ policySha1 := he
+
+ url := cloudfront.BaseURL + path + "?" + querystrings + "&Expires=" + strconv.FormatInt(expires.Unix(), 10) + "&Signature=" + policySha1 + "&Key-Pair-Id=" + cloudfront.keyPairId
+
+ return url
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/cloudfront_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/cloudfront_test.go
new file mode 100644
index 00000000..63744d1c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/cloudfront_test.go
@@ -0,0 +1,52 @@
+package cloudfront
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "io/ioutil"
+ "net/url"
+ "testing"
+ "time"
+)
+
+func TestSignedCannedURL(t *testing.T) {
+ rawKey, err := ioutil.ReadFile("testdata/key.pem")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pemKey, _ := pem.Decode(rawKey)
+ privateKey, err := x509.ParsePKCS1PrivateKey(pemKey.Bytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cf := &CloudFront{
+ key: privateKey,
+ keyPairId: "test-key-pair-1231245",
+ BaseURL: "https://cloudfront.com",
+ }
+
+ expireTime, err := time.Parse(time.RFC3339, "2014-03-28T14:00:21Z")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ query := make(url.Values)
+ query.Add("test", "value")
+
+ uri, err := cf.CannedSignedURL("test", "test=value", expireTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parsed, err := url.Parse(uri)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ signature := parsed.Query().Get("Signature")
+ if signature == "" {
+ t.Fatal("Encoded signature is empty")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/testdata/key.pem b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/testdata/key.pem
new file mode 100644
index 00000000..96e820a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/testdata/key.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXAIBAAKBgQC0yMzp9DkPAE99DhsEaGkqougLvtmDKri4bZj0fFjmGmjyyjz9
+hlrsr87LHVWzH/7igK7040HG1UqypX3ijtJa9+6BKHwBBctboU3y4GfwFwVAOumY
+9UytFpyPlgUFrffZLQAywKkT24OgcfEj0G5kiQn760wFnmSUtOuITo708QIDAQAB
+AoGAJUA6+PoZx72Io3wElSPuh5qJteHdb+mdpmLu4XG936wRc/W4G4VTtvGC6tdg
+kUhGfOWHJ26sXwwUGDuBdO146m0DkBTuIooy97afpL6hXgL5v4ELHbbuFJcf4Geg
+/UAuexvRT1HenYFQ/iXM0LlqI33i8cFRc1A+j0Gseo07gAECQQDYFCn7OUokX+Q8
+M2Cwhu7JT1obmP2HwsBtXl0CDDxtOQkuYJP/UqvtdYPz/kRn3yQjoynaCTHYrFz/
+H8oN1nNhAkEA1i9TEpo7RbanIyT4vbc1/5xfjE7Pj0lnGku0QXFp/S+8YxbqhjrQ
+4Qp7TTXIPPqvQhhEpAGGspM460K3F6h7kQJBANJCbMeFa9wRY2ohJIkiA+HoUWph
+aPNeUxkZpa+EcJhn08NJPzpIG/ypSYl3duEMhYIYF3WPVO3ea2/mYxsr/oECQFj5
+td/fdEoEk7AU1sQxDNyPwF2QC8dxbcRNuKcLD0Wfg/oB9hEm88jYytoLQpCabx3c
+6P7cp3EdmaKZx2erlRECQDYTSK2tS0+VoXSV9JbU08Pbu53j3Zhmp4l0csP+l7EU
+U+rRQzKho4X9vpR/VpRGXbw8tTIhojNpHh5ofryVfgk=
+-----END RSA PRIVATE KEY-----
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/testdata/key.pub b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/testdata/key.pub
new file mode 100644
index 00000000..7d0b5b4d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/cloudfront/testdata/key.pub
@@ -0,0 +1,6 @@
+-----BEGIN PUBLIC KEY-----
+MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC0yMzp9DkPAE99DhsEaGkqougL
+vtmDKri4bZj0fFjmGmjyyjz9hlrsr87LHVWzH/7igK7040HG1UqypX3ijtJa9+6B
+KHwBBctboU3y4GfwFwVAOumY9UytFpyPlgUFrffZLQAywKkT24OgcfEj0G5kiQn7
+60wFnmSUtOuITo708QIDAQAB
+-----END PUBLIC KEY-----
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/export_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/export_test.go
new file mode 100644
index 00000000..a4130791
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/export_test.go
@@ -0,0 +1,27 @@
+package s3
+
+import (
+ "github.com/crowdmob/goamz/aws"
+)
+
+var originalStrategy = attempts
+
+func SetAttemptStrategy(s *aws.AttemptStrategy) {
+ if s == nil {
+ attempts = originalStrategy
+ } else {
+ attempts = *s
+ }
+}
+
+func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
+ sign(auth, method, path, params, headers)
+}
+
+func SetListPartsMax(n int) {
+ listPartsMax = n
+}
+
+func SetListMultiMax(n int) {
+ listMultiMax = n
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/lifecycle.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/lifecycle.go
new file mode 100644
index 00000000..d9281261
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/lifecycle.go
@@ -0,0 +1,202 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/xml"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+// Implements an interface for s3 bucket lifecycle configuration
+// See goo.gl/d0bbDf for details.
+
+const (
+ LifecycleRuleStatusEnabled = "Enabled"
+ LifecycleRuleStatusDisabled = "Disabled"
+ LifecycleRuleDateFormat = "2006-01-02"
+ StorageClassGlacier = "GLACIER"
+)
+
+type Expiration struct {
+ Days *uint `xml:"Days,omitempty"`
+ Date string `xml:"Date,omitempty"`
+}
+
+// Returns Date as a time.Time.
+func (r *Expiration) ParseDate() (time.Time, error) {
+ return time.Parse(LifecycleRuleDateFormat, r.Date)
+}
+
+type Transition struct {
+ Days *uint `xml:"Days,omitempty"`
+ Date string `xml:"Date,omitempty"`
+ StorageClass string `xml:"StorageClass"`
+}
+
+// Returns Date as a time.Time.
+func (r *Transition) ParseDate() (time.Time, error) {
+ return time.Parse(LifecycleRuleDateFormat, r.Date)
+}
+
+type NoncurrentVersionExpiration struct {
+ Days *uint `xml:"NoncurrentDays,omitempty"`
+}
+
+type NoncurrentVersionTransition struct {
+ Days *uint `xml:"NoncurrentDays,omitempty"`
+ StorageClass string `xml:"StorageClass"`
+}
+
+type LifecycleRule struct {
+ ID string `xml:"ID"`
+ Prefix string `xml:"Prefix"`
+ Status string `xml:"Status"`
+ NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
+ Transition *Transition `xml:"Transition,omitempty"`
+ Expiration *Expiration `xml:"Expiration,omitempty"`
+}
+
+// Create a lifecycle rule with arbitrary identifier id and object name prefix
+// for which the rules should apply.
+func NewLifecycleRule(id, prefix string) *LifecycleRule {
+ rule := &LifecycleRule{
+ ID: id,
+ Prefix: prefix,
+ Status: LifecycleRuleStatusEnabled,
+ }
+ return rule
+}
+
+// Adds a transition rule in days. Overwrites any previous transition rule.
+func (r *LifecycleRule) SetTransitionDays(days uint) {
+ r.Transition = &Transition{
+ Days: &days,
+ StorageClass: StorageClassGlacier,
+ }
+}
+
+// Adds a transition rule as a date. Overwrites any previous transition rule.
+func (r *LifecycleRule) SetTransitionDate(date time.Time) {
+ r.Transition = &Transition{
+ Date: date.Format(LifecycleRuleDateFormat),
+ StorageClass: StorageClassGlacier,
+ }
+}
+
+// Adds an expiration rule in days. Overwrites any previous expiration rule.
+// Days must be > 0.
+func (r *LifecycleRule) SetExpirationDays(days uint) {
+ r.Expiration = &Expiration{
+ Days: &days,
+ }
+}
+
+// Adds an expiration rule as a date. Overwrites any previous expiration rule.
+func (r *LifecycleRule) SetExpirationDate(date time.Time) {
+ r.Expiration = &Expiration{
+ Date: date.Format(LifecycleRuleDateFormat),
+ }
+}
+
+// Adds a noncurrent version transition rule. Overwrites any previous
+// noncurrent version transition rule.
+func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) {
+ r.NoncurrentVersionTransition = &NoncurrentVersionTransition{
+ Days: &days,
+ StorageClass: StorageClassGlacier,
+ }
+}
+
+// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites
+// any previous noncurrent version expiration rule.
+func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) {
+ r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{
+ Days: &days,
+ }
+}
+
+// Marks the rule as disabled.
+func (r *LifecycleRule) Disable() {
+ r.Status = LifecycleRuleStatusDisabled
+}
+
+// Marks the rule as enabled (default).
+func (r *LifecycleRule) Enable() {
+ r.Status = LifecycleRuleStatusEnabled
+}
+
+type LifecycleConfiguration struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration"`
+ Rules *[]*LifecycleRule `xml:"Rule,omitempty"`
+}
+
+// Adds a LifecycleRule to the configuration.
+func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) {
+ var rules []*LifecycleRule
+ if c.Rules != nil {
+ rules = *c.Rules
+ }
+ rules = append(rules, r)
+ c.Rules = &rules
+}
+
+// Sets the bucket's lifecycle configuration.
+func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error {
+ doc, err := xml.Marshal(c)
+ if err != nil {
+ return err
+ }
+
+ buf := makeXmlBuffer(doc)
+ digest := md5.New()
+ size, err := digest.Write(buf.Bytes())
+ if err != nil {
+ return err
+ }
+
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(int64(size), 10)},
+ "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
+ }
+
+ req := &request{
+ path: "/",
+ method: "PUT",
+ bucket: b.Name,
+ headers: headers,
+ payload: buf,
+ params: url.Values{"lifecycle": {""}},
+ }
+
+ return b.S3.queryV4Sign(req, nil)
+}
+
+// Retrieves the lifecycle configuration for the bucket. AWS returns an error
+// if no lifecycle found.
+func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) {
+ req := &request{
+ method: "GET",
+ bucket: b.Name,
+ path: "/",
+ params: url.Values{"lifecycle": {""}},
+ }
+
+ conf := &LifecycleConfiguration{}
+ err := b.S3.queryV4Sign(req, conf)
+ return conf, err
+}
+
+// Delete the bucket's lifecycle configuration.
+func (b *Bucket) DeleteLifecycleConfiguration() error {
+ req := &request{
+ method: "DELETE",
+ bucket: b.Name,
+ path: "/",
+ params: url.Values{"lifecycle": {""}},
+ }
+
+ return b.S3.queryV4Sign(req, nil)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/lifecycle_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/lifecycle_test.go
new file mode 100644
index 00000000..04e143fc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/lifecycle_test.go
@@ -0,0 +1,205 @@
+package s3_test
+
+import (
+ "encoding/xml"
+ "github.com/crowdmob/goamz/s3"
+ "gopkg.in/check.v1"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+)
+
+func (s *S) TestLifecycleConfiguration(c *check.C) {
+ date, err := time.Parse(s3.LifecycleRuleDateFormat, "2014-09-10")
+ c.Check(err, check.IsNil)
+
+ conf := &s3.LifecycleConfiguration{}
+
+ rule := s3.NewLifecycleRule("transition-days", "/")
+ rule.SetTransitionDays(7)
+ conf.AddRule(rule)
+
+ rule = s3.NewLifecycleRule("transition-date", "/")
+ rule.SetTransitionDate(date)
+ conf.AddRule(rule)
+
+ rule = s3.NewLifecycleRule("expiration-days", "")
+ rule.SetExpirationDays(1)
+ conf.AddRule(rule)
+
+ rule = s3.NewLifecycleRule("expiration-date", "")
+ rule.SetExpirationDate(date)
+ conf.AddRule(rule)
+
+ rule = s3.NewLifecycleRule("noncurrent-transition", "")
+ rule.SetNoncurrentVersionTransitionDays(11)
+ conf.AddRule(rule)
+
+ rule = s3.NewLifecycleRule("noncurrent-expiration", "")
+ rule.SetNoncurrentVersionExpirationDays(1011)
+
+ // Test Disable() and Enable() toggling
+ c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusEnabled)
+ rule.Disable()
+ c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusDisabled)
+ rule.Enable()
+ c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusEnabled)
+ rule.Disable()
+ c.Check(rule.Status, check.Equals, s3.LifecycleRuleStatusDisabled)
+
+ conf.AddRule(rule)
+
+ doc, err := xml.MarshalIndent(conf, "", " ")
+ c.Check(err, check.IsNil)
+
+ expectedDoc := `
+
+ transition-days
+ /
+ Enabled
+
+ 7
+ GLACIER
+
+
+
+ transition-date
+ /
+ Enabled
+
+ 2014-09-10
+ GLACIER
+
+
+
+ expiration-days
+
+ Enabled
+
+ 1
+
+
+
+ expiration-date
+
+ Enabled
+
+ 2014-09-10
+
+
+
+ noncurrent-transition
+
+ Enabled
+
+ 11
+ GLACIER
+
+
+
+ noncurrent-expiration
+
+ Disabled
+
+ 1011
+
+
+`
+
+ c.Check(string(doc), check.Equals, expectedDoc)
+
+ // Unmarshalling test
+ conf2 := &s3.LifecycleConfiguration{}
+ err = xml.Unmarshal(doc, conf2)
+ c.Check(err, check.IsNil)
+ s.checkLifecycleConfigurationEqual(c, conf, conf2)
+}
+
+func (s *S) checkLifecycleConfigurationEqual(c *check.C, conf, conf2 *s3.LifecycleConfiguration) {
+ c.Check(len(*conf2.Rules), check.Equals, len(*conf.Rules))
+ for i, rule := range *conf2.Rules {
+ confRules := *conf.Rules
+ c.Check(rule, check.DeepEquals, confRules[i])
+ }
+}
+
+func (s *S) checkLifecycleRequest(c *check.C, req *http.Request) {
+ // ?lifecycle= is the only query param
+ v, ok := req.Form["lifecycle"]
+ c.Assert(ok, check.Equals, true)
+ c.Assert(v, check.HasLen, 1)
+ c.Assert(v[0], check.Equals, "")
+
+ c.Assert(req.Header["X-Amz-Date"], check.HasLen, 1)
+ c.Assert(req.Header["X-Amz-Date"][0], check.Not(check.Equals), "")
+
+ // Lifecycle methods require V4 auth
+ usesV4 := strings.HasPrefix(req.Header["Authorization"][0], "AWS4-HMAC-SHA256")
+ c.Assert(usesV4, check.Equals, true)
+}
+
+func (s *S) TestPutLifecycleConfiguration(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ conf := &s3.LifecycleConfiguration{}
+ rule := s3.NewLifecycleRule("id", "")
+ rule.SetTransitionDays(7)
+ conf.AddRule(rule)
+
+ doc, err := xml.Marshal(conf)
+ c.Check(err, check.IsNil)
+
+ b := s.s3.Bucket("bucket")
+ err = b.PutLifecycleConfiguration(conf)
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/")
+ c.Assert(req.Header["Content-Md5"], check.HasLen, 1)
+ c.Assert(req.Header["Content-Md5"][0], check.Not(check.Equals), "")
+ s.checkLifecycleRequest(c, req)
+
+ // Check we sent the correct xml serialization
+ data, err := ioutil.ReadAll(req.Body)
+ req.Body.Close()
+ c.Assert(err, check.IsNil)
+ header := "\n"
+ c.Assert(string(data), check.Equals, header+string(doc))
+}
+
+func (s *S) TestGetLifecycleConfiguration(c *check.C) {
+ conf := &s3.LifecycleConfiguration{}
+ rule := s3.NewLifecycleRule("id", "")
+ rule.SetTransitionDays(7)
+ conf.AddRule(rule)
+
+ doc, err := xml.Marshal(conf)
+ c.Check(err, check.IsNil)
+
+ testServer.Response(200, nil, string(doc))
+
+ b := s.s3.Bucket("bucket")
+ conf2, err := b.GetLifecycleConfiguration()
+ c.Check(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/")
+ s.checkLifecycleRequest(c, req)
+ s.checkLifecycleConfigurationEqual(c, conf, conf2)
+}
+
+func (s *S) TestDeleteLifecycleConfiguration(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.DeleteLifecycleConfiguration()
+ c.Check(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "DELETE")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/")
+ s.checkLifecycleRequest(c, req)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/multi.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/multi.go
new file mode 100644
index 00000000..6799ca51
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/multi.go
@@ -0,0 +1,464 @@
+package s3
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "errors"
+ "io"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Multi represents an unfinished multipart upload.
+//
+// Multipart uploads allow sending big objects in smaller chunks.
+// After all parts have been sent, the upload must be explicitly
+// completed by calling Complete with the list of parts.
+//
+// See http://goo.gl/vJfTG for an overview of multipart uploads.
+type Multi struct {
+ Bucket *Bucket
+ Key string
+ UploadId string
+}
+
+// That's the default. Here just for testing.
+var listMultiMax = 1000
+
+type listMultiResp struct {
+ NextKeyMarker string
+ NextUploadIdMarker string
+ IsTruncated bool
+ Upload []Multi
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+}
+
+// ListMulti returns the list of unfinished multipart uploads in b.
+//
+// The prefix parameter limits the response to keys that begin with the
+// specified prefix. You can use prefixes to separate a bucket into different
+// groupings of keys (to get the feeling of folders, for example).
+//
+// The delim parameter causes the response to group all of the keys that
+// share a common prefix up to the next delimiter in a single entry within
+// the CommonPrefixes field. You can use delimiters to separate a bucket
+// into different groupings of keys, similar to how folders would work.
+//
+// See http://goo.gl/ePioY for details.
+func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
+ params := map[string][]string{
+ "uploads": {""},
+ "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
+ "prefix": {prefix},
+ "delimiter": {delim},
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ req := &request{
+ method: "GET",
+ bucket: b.Name,
+ params: params,
+ }
+ var resp listMultiResp
+ err := b.S3.query(req, &resp)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ for i := range resp.Upload {
+ multi := &resp.Upload[i]
+ multi.Bucket = b
+ multis = append(multis, multi)
+ }
+ prefixes = append(prefixes, resp.CommonPrefixes...)
+ if !resp.IsTruncated {
+ return multis, prefixes, nil
+ }
+ params["key-marker"] = []string{resp.NextKeyMarker}
+ params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
+ attempt = attempts.Start() // Last request worked.
+ }
+ panic("unreachable")
+}
+
+// Multi returns a multipart upload handler for the provided key
+// inside b. If a multipart upload exists for key, it is returned,
+// otherwise a new multipart upload is initiated with contType and perm.
+func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) {
+ multis, _, err := b.ListMulti(key, "")
+ if err != nil && !hasCode(err, "NoSuchUpload") {
+ return nil, err
+ }
+ for _, m := range multis {
+ if m.Key == key {
+ return m, nil
+ }
+ }
+ return b.InitMulti(key, contType, perm, options)
+}
+
+// InitMulti initializes a new multipart upload at the provided
+// key inside b and returns a value for manipulating it.
+//
+// See http://goo.gl/XP8kL for details.
+func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) {
+ headers := map[string][]string{
+ "Content-Type": {contType},
+ "Content-Length": {"0"},
+ "x-amz-acl": {string(perm)},
+ }
+ options.addHeaders(headers)
+ params := map[string][]string{
+ "uploads": {""},
+ }
+ req := &request{
+ method: "POST",
+ bucket: b.Name,
+ path: key,
+ headers: headers,
+ params: params,
+ }
+ var err error
+ var resp struct {
+ UploadId string `xml:"UploadId"`
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ err = b.S3.query(req, &resp)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
+}
+
+func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
+ headers := map[string][]string{
+ "x-amz-copy-source": {url.QueryEscape(source)},
+ }
+ options.addHeaders(headers)
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ "partNumber": {strconv.FormatInt(int64(n), 10)},
+ }
+
+ sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/"))
+ sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil)
+ if err != nil {
+ return nil, Part{}, err
+ }
+
+ for attempt := attempts.Start(); attempt.Next(); {
+ req := &request{
+ method: "PUT",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ headers: headers,
+ params: params,
+ }
+ resp := &CopyObjectResult{}
+ err := m.Bucket.S3.query(req, resp)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, Part{}, err
+ }
+ if resp.ETag == "" {
+ return nil, Part{}, errors.New("part upload succeeded with no ETag")
+ }
+ return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil
+ }
+ panic("unreachable")
+}
+
+// PutPart sends part n of the multipart upload, reading all the content from r.
+// Each part, except for the last one, must be at least 5MB in size.
+//
+// See http://goo.gl/pqZer for details.
+func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
+ partSize, _, md5b64, err := seekerInfo(r)
+ if err != nil {
+ return Part{}, err
+ }
+ return m.putPart(n, r, partSize, md5b64)
+}
+
+func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(partSize, 10)},
+ "Content-MD5": {md5b64},
+ }
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ "partNumber": {strconv.FormatInt(int64(n), 10)},
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ _, err := r.Seek(0, 0)
+ if err != nil {
+ return Part{}, err
+ }
+ req := &request{
+ method: "PUT",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ headers: headers,
+ params: params,
+ payload: r,
+ }
+ err = m.Bucket.S3.prepare(req)
+ if err != nil {
+ return Part{}, err
+ }
+ resp, err := m.Bucket.S3.run(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return Part{}, err
+ }
+ etag := resp.Header.Get("ETag")
+ if etag == "" {
+ return Part{}, errors.New("part upload succeeded with no ETag")
+ }
+ return Part{n, etag, partSize}, nil
+ }
+ panic("unreachable")
+}
+
+func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
+ _, err = r.Seek(0, 0)
+ if err != nil {
+ return 0, "", "", err
+ }
+ digest := md5.New()
+ size, err = io.Copy(digest, r)
+ if err != nil {
+ return 0, "", "", err
+ }
+ sum := digest.Sum(nil)
+ md5hex = hex.EncodeToString(sum)
+ md5b64 = base64.StdEncoding.EncodeToString(sum)
+ return size, md5hex, md5b64, nil
+}
+
+type Part struct {
+ N int `xml:"PartNumber"`
+ ETag string
+ Size int64
+}
+
+type partSlice []Part
+
+func (s partSlice) Len() int { return len(s) }
+func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
+func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type listPartsResp struct {
+ NextPartNumberMarker string
+ IsTruncated bool
+ Part []Part
+}
+
+// That's the default. Here just for testing.
+var listPartsMax = 1000
+
+// Kept for backcompatability. See the documentation for ListPartsFull
+func (m *Multi) ListParts() ([]Part, error) {
+ return m.ListPartsFull(0, listPartsMax)
+}
+
+// ListParts returns the list of previously uploaded parts in m,
+// ordered by part number (Only parts with higher part numbers than
+// partNumberMarker will be listed). Only up to maxParts parts will be
+// returned.
+//
+// See http://goo.gl/ePioY for details.
+func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) {
+ if maxParts > listPartsMax {
+ maxParts = listPartsMax
+ }
+
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ "max-parts": {strconv.FormatInt(int64(maxParts), 10)},
+ "part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)},
+ }
+ var parts partSlice
+ for attempt := attempts.Start(); attempt.Next(); {
+ req := &request{
+ method: "GET",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ params: params,
+ }
+ var resp listPartsResp
+ err := m.Bucket.S3.query(req, &resp)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ parts = append(parts, resp.Part...)
+ if !resp.IsTruncated {
+ sort.Sort(parts)
+ return parts, nil
+ }
+ params["part-number-marker"] = []string{resp.NextPartNumberMarker}
+ attempt = attempts.Start() // Last request worked.
+ }
+ panic("unreachable")
+}
+
+type ReaderAtSeeker interface {
+ io.ReaderAt
+ io.ReadSeeker
+}
+
+// PutAll sends all of r via a multipart upload with parts no larger
+// than partSize bytes, which must be set to at least 5MB.
+// Parts previously uploaded are either reused if their checksum
+// and size match the new part, or otherwise overwritten with the
+// new content.
+// PutAll returns all the parts of m (reused or not).
+func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
+ old, err := m.ListParts()
+ if err != nil && !hasCode(err, "NoSuchUpload") {
+ return nil, err
+ }
+ reuse := 0 // Index of next old part to consider reusing.
+ current := 1 // Part number of latest good part handled.
+ totalSize, err := r.Seek(0, 2)
+ if err != nil {
+ return nil, err
+ }
+ first := true // Must send at least one empty part if the file is empty.
+ var result []Part
+NextSection:
+ for offset := int64(0); offset < totalSize || first; offset += partSize {
+ first = false
+ if offset+partSize > totalSize {
+ partSize = totalSize - offset
+ }
+ section := io.NewSectionReader(r, offset, partSize)
+ _, md5hex, md5b64, err := seekerInfo(section)
+ if err != nil {
+ return nil, err
+ }
+ for reuse < len(old) && old[reuse].N <= current {
+ // Looks like this part was already sent.
+ part := &old[reuse]
+ etag := `"` + md5hex + `"`
+ if part.N == current && part.Size == partSize && part.ETag == etag {
+ // Checksum matches. Reuse the old part.
+ result = append(result, *part)
+ current++
+ continue NextSection
+ }
+ reuse++
+ }
+
+ // Part wasn't found or doesn't match. Send it.
+ part, err := m.putPart(current, section, partSize, md5b64)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, part)
+ current++
+ }
+ return result, nil
+}
+
+type completeUpload struct {
+ XMLName xml.Name `xml:"CompleteMultipartUpload"`
+ Parts completeParts `xml:"Part"`
+}
+
+type completePart struct {
+ PartNumber int
+ ETag string
+}
+
+type completeParts []completePart
+
+func (p completeParts) Len() int { return len(p) }
+func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
+func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// Complete assembles the given previously uploaded parts into the
+// final object. This operation may take several minutes.
+//
+// See http://goo.gl/2Z7Tw for details.
+func (m *Multi) Complete(parts []Part) error {
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ }
+ c := completeUpload{}
+ for _, p := range parts {
+ c.Parts = append(c.Parts, completePart{p.N, p.ETag})
+ }
+ sort.Sort(c.Parts)
+ data, err := xml.Marshal(&c)
+ if err != nil {
+ return err
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ req := &request{
+ method: "POST",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ params: params,
+ payload: bytes.NewReader(data),
+ }
+ err := m.Bucket.S3.query(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ return err
+ }
+ panic("unreachable")
+}
+
+// Abort deletes an unifinished multipart upload and any previously
+// uploaded parts for it.
+//
+// After a multipart upload is aborted, no additional parts can be
+// uploaded using it. However, if any part uploads are currently in
+// progress, those part uploads might or might not succeed. As a result,
+// it might be necessary to abort a given multipart upload multiple
+// times in order to completely free all storage consumed by all parts.
+//
+// NOTE: If the described scenario happens to you, please report back to
+// the goamz authors with details. In the future such retrying should be
+// handled internally, but it's not clear what happens precisely (Is an
+// error returned? Is the issue completely undetectable?).
+//
+// See http://goo.gl/dnyJw for details.
+func (m *Multi) Abort() error {
+ params := map[string][]string{
+ "uploadId": {m.UploadId},
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ req := &request{
+ method: "DELETE",
+ bucket: m.Bucket.Name,
+ path: m.Key,
+ params: params,
+ }
+ err := m.Bucket.S3.query(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ return err
+ }
+ panic("unreachable")
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/multi_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/multi_test.go
new file mode 100644
index 00000000..eadfadec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/multi_test.go
@@ -0,0 +1,425 @@
+package s3_test
+
+import (
+ "encoding/xml"
+ "github.com/crowdmob/goamz/s3"
+ "gopkg.in/check.v1"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+func (s *S) TestInitMulti(c *check.C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ b := s.s3.Bucket("sample")
+
+ metadata := make(map[string][]string)
+ metadata["key1"] = []string{"value1"}
+ metadata["key2"] = []string{"value2"}
+ options := s3.Options{
+ SSE: true,
+ Meta: metadata,
+ ContentEncoding: "text/utf8",
+ CacheControl: "no-cache",
+ RedirectLocation: "http://github.com/crowdmob/goamz",
+ ContentMD5: "0000000000000000",
+ }
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, options)
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "POST")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"text/plain"})
+ c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
+ c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
+
+ c.Assert(req.Header["X-Amz-Server-Side-Encryption"], check.DeepEquals, []string{"AES256"})
+ c.Assert(req.Header["Content-Encoding"], check.DeepEquals, []string{"text/utf8"})
+ c.Assert(req.Header["Cache-Control"], check.DeepEquals, []string{"no-cache"})
+ c.Assert(req.Header["Content-Md5"], check.DeepEquals, []string{"0000000000000000"})
+ c.Assert(req.Header["X-Amz-Website-Redirect-Location"], check.DeepEquals, []string{"http://github.com/crowdmob/goamz"})
+ c.Assert(req.Header["X-Amz-Meta-Key1"], check.DeepEquals, []string{"value1"})
+ c.Assert(req.Header["X-Amz-Meta-Key2"], check.DeepEquals, []string{"value2"})
+
+ c.Assert(multi.UploadId, check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+}
+
+func (s *S) TestMultiNoPreviousUpload(c *check.C) {
+ // Don't retry the NoSuchUpload error.
+ s.DisableRetries()
+
+ testServer.Response(404, nil, NoSuchUploadErrorDump)
+ testServer.Response(200, nil, InitMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.Multi("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/")
+ c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
+ c.Assert(req.Form["prefix"], check.DeepEquals, []string{"multi"})
+
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "POST")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
+
+ c.Assert(multi.UploadId, check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+}
+
+func (s *S) TestMultiReturnOld(c *check.C) {
+ testServer.Response(200, nil, ListMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.Multi("multi1", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ c.Assert(multi.Key, check.Equals, "multi1")
+ c.Assert(multi.UploadId, check.Equals, "iUVug89pPvSswrikD")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/")
+ c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
+ c.Assert(req.Form["prefix"], check.DeepEquals, []string{"multi1"})
+}
+
+func (s *S) TestListParts(c *check.C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, nil, ListPartsResultDump1)
+ testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
+ testServer.Response(200, nil, ListPartsResultDump2)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ parts, err := multi.ListParts()
+ c.Assert(err, check.IsNil)
+ c.Assert(parts, check.HasLen, 3)
+ c.Assert(parts[0].N, check.Equals, 1)
+ c.Assert(parts[0].Size, check.Equals, int64(5))
+ c.Assert(parts[0].ETag, check.Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
+ c.Assert(parts[1].N, check.Equals, 2)
+ c.Assert(parts[1].Size, check.Equals, int64(5))
+ c.Assert(parts[1].ETag, check.Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
+ c.Assert(parts[2].N, check.Equals, 3)
+ c.Assert(parts[2].Size, check.Equals, int64(5))
+ c.Assert(parts[2].ETag, check.Equals, `"49dcd91231f801159e893fb5c6674985"`)
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["max-parts"], check.DeepEquals, []string{"1000"})
+
+ testServer.WaitRequest() // The internal error.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["max-parts"], check.DeepEquals, []string{"1000"})
+ c.Assert(req.Form["part-number-marker"], check.DeepEquals, []string{"2"})
+}
+
+func (s *S) TestPutPart(c *check.C) {
+ headers := map[string]string{
+ "ETag": `"26f90efd10d614f100252ff56d88dad8"`,
+ }
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, headers, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ part, err := multi.PutPart(1, strings.NewReader(""))
+ c.Assert(err, check.IsNil)
+ c.Assert(part.N, check.Equals, 1)
+ c.Assert(part.Size, check.Equals, int64(8))
+ c.Assert(part.ETag, check.Equals, headers["ETag"])
+
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"8"})
+ c.Assert(req.Header["Content-Md5"], check.DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
+}
+
+func (s *S) TestPutPartCopy(c *check.C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ // PutPartCopy makes a Head request internally to verify access to the source object
+ // and obtain its size
+ testServer.Response(200, nil, "content")
+ testServer.Response(200, nil, PutCopyResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ res, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, "source-bucket/\u00FCber-fil\u00E9.jpg")
+ c.Assert(err, check.IsNil)
+ c.Assert(part.N, check.Equals, 1)
+ c.Assert(part.Size, check.Equals, int64(7))
+ c.Assert(res, check.DeepEquals, &s3.CopyObjectResult{
+ ETag: `"9b2cf535f27731c974343645a3985328"`,
+ LastModified: `2009-10-28T22:32:00`})
+
+ // Verify the Head request
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "POST")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+ c.Assert(err, check.IsNil)
+
+ testServer.WaitRequest()
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
+ c.Assert(req.Header["X-Amz-Copy-Source"], check.DeepEquals, []string{`source-bucket%2F%C3%BCber-fil%C3%A9.jpg`})
+}
+
+func readAll(r io.Reader) string {
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (s *S) TestPutAllNoPreviousUpload(c *check.C) {
+ // Don't retry the NoSuchUpload error.
+ s.DisableRetries()
+
+ etag1 := map[string]string{"ETag": `"etag1"`}
+ etag2 := map[string]string{"ETag": `"etag2"`}
+ etag3 := map[string]string{"ETag": `"etag3"`}
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(404, nil, NoSuchUploadErrorDump)
+ testServer.Response(200, etag1, "")
+ testServer.Response(200, etag2, "")
+ testServer.Response(200, etag3, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
+ c.Assert(parts, check.HasLen, 3)
+ c.Assert(parts[0].ETag, check.Equals, `"etag1"`)
+ c.Assert(parts[1].ETag, check.Equals, `"etag2"`)
+ c.Assert(parts[2].ETag, check.Equals, `"etag3"`)
+ c.Assert(err, check.IsNil)
+
+ // Init
+ testServer.WaitRequest()
+
+ // List old parts. Won't find anything.
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+
+ // Send part 1.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"})
+ c.Assert(readAll(req.Body), check.Equals, "part1")
+
+ // Send part 2.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"2"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"})
+ c.Assert(readAll(req.Body), check.Equals, "part2")
+
+ // Send part 3 with shorter body.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"3"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"4"})
+ c.Assert(readAll(req.Body), check.Equals, "last")
+}
+
+func (s *S) TestPutAllZeroSizeFile(c *check.C) {
+ // Don't retry the NoSuchUpload error.
+ s.DisableRetries()
+
+ etag1 := map[string]string{"ETag": `"etag1"`}
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(404, nil, NoSuchUploadErrorDump)
+ testServer.Response(200, etag1, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ // Must send at least one part, so that completing it will work.
+ parts, err := multi.PutAll(strings.NewReader(""), 5)
+ c.Assert(parts, check.HasLen, 1)
+ c.Assert(parts[0].ETag, check.Equals, `"etag1"`)
+ c.Assert(err, check.IsNil)
+
+ // Init
+ testServer.WaitRequest()
+
+ // List old parts. Won't find anything.
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+
+ // Send empty part.
+ req = testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"1"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"0"})
+ c.Assert(readAll(req.Body), check.Equals, "")
+}
+
+func (s *S) TestPutAllResume(c *check.C) {
+ etag2 := map[string]string{"ETag": `"etag2"`}
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, nil, ListPartsResultDump1)
+ testServer.Response(200, nil, ListPartsResultDump2)
+ testServer.Response(200, etag2, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ // "part1" and "part3" match the checksums in ResultDump1.
+ // The middle one is a mismatch (it refers to "part2").
+ parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
+ c.Assert(parts, check.HasLen, 3)
+ c.Assert(parts[0].N, check.Equals, 1)
+ c.Assert(parts[0].Size, check.Equals, int64(5))
+ c.Assert(parts[0].ETag, check.Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
+ c.Assert(parts[1].N, check.Equals, 2)
+ c.Assert(parts[1].Size, check.Equals, int64(5))
+ c.Assert(parts[1].ETag, check.Equals, `"etag2"`)
+ c.Assert(parts[2].N, check.Equals, 3)
+ c.Assert(parts[2].Size, check.Equals, int64(5))
+ c.Assert(parts[2].ETag, check.Equals, `"49dcd91231f801159e893fb5c6674985"`)
+ c.Assert(err, check.IsNil)
+
+ // Init
+ testServer.WaitRequest()
+
+ // List old parts, broken in two requests.
+ for i := 0; i < 2; i++ {
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ }
+
+ // Send part 2, as it didn't match the checksum.
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form["partNumber"], check.DeepEquals, []string{"2"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"5"})
+ c.Assert(readAll(req.Body), check.Equals, "partX")
+}
+
+func (s *S) TestMultiComplete(c *check.C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ // Note the 200 response. Completing will hold the connection on some
+ // kind of long poll, and may return a late error even after a 200.
+ testServer.Response(200, nil, InternalErrorDump)
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
+ c.Assert(err, check.IsNil)
+
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "POST")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+
+ var payload struct {
+ XMLName xml.Name
+ Part []struct {
+ PartNumber int
+ ETag string
+ }
+ }
+
+ dec := xml.NewDecoder(req.Body)
+ err = dec.Decode(&payload)
+ c.Assert(err, check.IsNil)
+
+ c.Assert(payload.XMLName.Local, check.Equals, "CompleteMultipartUpload")
+ c.Assert(len(payload.Part), check.Equals, 2)
+ c.Assert(payload.Part[0].PartNumber, check.Equals, 1)
+ c.Assert(payload.Part[0].ETag, check.Equals, `"ETag1"`)
+ c.Assert(payload.Part[1].PartNumber, check.Equals, 2)
+ c.Assert(payload.Part[1].ETag, check.Equals, `"ETag2"`)
+}
+
+func (s *S) TestMultiAbort(c *check.C) {
+ testServer.Response(200, nil, InitMultiResultDump)
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("sample")
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ err = multi.Abort()
+ c.Assert(err, check.IsNil)
+
+ testServer.WaitRequest()
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "DELETE")
+ c.Assert(req.URL.Path, check.Equals, "/sample/multi")
+ c.Assert(req.Form.Get("uploadId"), check.Matches, "JNbR_[A-Za-z0-9.]+QQ--")
+}
+
+func (s *S) TestListMulti(c *check.C) {
+ testServer.Response(200, nil, ListMultiResultDump)
+
+ b := s.s3.Bucket("sample")
+
+ multis, prefixes, err := b.ListMulti("", "/")
+ c.Assert(err, check.IsNil)
+ c.Assert(prefixes, check.DeepEquals, []string{"a/", "b/"})
+ c.Assert(multis, check.HasLen, 2)
+ c.Assert(multis[0].Key, check.Equals, "multi1")
+ c.Assert(multis[0].UploadId, check.Equals, "iUVug89pPvSswrikD")
+ c.Assert(multis[1].Key, check.Equals, "multi2")
+ c.Assert(multis[1].UploadId, check.Equals, "DkirwsSvPp98guVUi")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/sample/")
+ c.Assert(req.Form["uploads"], check.DeepEquals, []string{""})
+ c.Assert(req.Form["prefix"], check.DeepEquals, []string{""})
+ c.Assert(req.Form["delimiter"], check.DeepEquals, []string{"/"})
+ c.Assert(req.Form["max-uploads"], check.DeepEquals, []string{"1000"})
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/responses_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/responses_test.go
new file mode 100644
index 00000000..22478b9c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/responses_test.go
@@ -0,0 +1,239 @@
+package s3_test
+
+var PutCopyResultDump = `
+
+
+ 2009-10-28T22:32:00
+ "9b2cf535f27731c974343645a3985328"
+
+`
+
+var GetObjectErrorDump = `
+
+NoSuchBucket
The specified bucket does not exist
+non-existent-bucket3F1B667FAD71C3D8
+L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D
+`
+
+var GetListResultDump1 = `
+
+
+ quotes
+ N
+ false
+
+ Nelson
+ 2006-01-01T12:00:00.000Z
+ "828ef3fdfa96f00ad9f27c383fc9ac7f"
+ 5
+ STANDARD
+
+ bcaf161ca5fb16fd081034f
+ webfile
+
+
+
+ Neo
+ 2006-01-01T12:00:00.000Z
+ "828ef3fdfa96f00ad9f27c383fc9ac7f"
+ 4
+ STANDARD
+
+ bcaf1ffd86a5fb16fd081034f
+ webfile
+
+
+
+`
+
+var GetListResultDump2 = `
+
+ example-bucket
+ photos/2006/
+ some-marker
+ 1000
+ /
+ false
+
+
+ photos/2006/feb/
+
+
+ photos/2006/jan/
+
+
+`
+
+var InitMultiResultDump = `
+
+
+ sample
+ multi
+ JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--
+
+`
+
+var ListPartsResultDump1 = `
+
+
+ sample
+ multi
+ JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--
+
+ bb5c0f63b0b25f2d099c
+ joe
+
+
+ bb5c0f63b0b25f2d099c
+ joe
+
+ STANDARD
+ 0
+ 2
+ 2
+ true
+
+ 1
+ 2013-01-30T13:45:51.000Z
+ "ffc88b4ca90a355f8ddba6b2c3b2af5c"
+ 5
+
+
+ 2
+ 2013-01-30T13:45:52.000Z
+ "d067a0fa9dc61a6e7195ca99696b5a89"
+ 5
+
+
+`
+
+var ListPartsResultDump2 = `
+
+
+ sample
+ multi
+ JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--
+
+ bb5c0f63b0b25f2d099c
+ joe
+
+
+ bb5c0f63b0b25f2d099c
+ joe
+
+ STANDARD
+ 2
+ 3
+ 2
+ false
+
+ 3
+ 2013-01-30T13:46:50.000Z
+ "49dcd91231f801159e893fb5c6674985"
+ 5
+
+
+`
+
+var ListMultiResultDump = `
+
+
+ goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a
+
+
+ multi1
+ iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--
+ /
+ 1000
+ false
+
+ multi1
+ iUVug89pPvSswrikD
+
+ bb5c0f63b0b25f2d0
+ gustavoniemeyer
+
+
+ bb5c0f63b0b25f2d0
+ gustavoniemeyer
+
+ STANDARD
+ 2013-01-30T18:15:47.000Z
+
+
+ multi2
+ DkirwsSvPp98guVUi
+
+ bb5c0f63b0b25f2d0
+ joe
+
+
+ bb5c0f63b0b25f2d0
+ joe
+
+ STANDARD
+ 2013-01-30T18:15:47.000Z
+
+
+ a/
+
+
+ b/
+
+
+`
+
+var NoSuchUploadErrorDump = `
+
+
+ NoSuchUpload
+ Not relevant
+ sample
+ 3F1B667FAD71C3D8
+ kjhwqk
+
+`
+
+var InternalErrorDump = `
+
+
+ InternalError
+ Not relevant
+ sample
+ 3F1B667FAD71C3D8
+ kjhwqk
+
+`
+
+var GetServiceDump = `
+
+
+
+ bcaf1ffd86f461ca5fb16fd081034f
+ webfile
+
+
+
+ quotes
+ 2006-02-03T16:45:09.000Z
+
+
+ samples
+ 2006-02-03T16:41:58.000Z
+
+
+
+`
+
+var GetLocationUsStandard = `
+
+
+`
+
+var GetLocationUsWest1 = `
+
+us-west-1
+`
+
+var BucketWebsiteConfigurationDump = `
+example.com`
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3.go
new file mode 100644
index 00000000..b51f4078
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3.go
@@ -0,0 +1,1204 @@
+//
+// goamz - Go packages to interact with the Amazon Web Services.
+//
+// https://wiki.ubuntu.com/goamz
+//
+// Copyright (c) 2011 Canonical Ltd.
+//
+// Written by Gustavo Niemeyer
+//
+
+package s3
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/crowdmob/goamz/aws"
+)
+
+const debug = false
+
+// The S3 type encapsulates operations with an S3 region.
+type S3 struct {
+ aws.Auth
+ aws.Region
+ ConnectTimeout time.Duration
+ ReadTimeout time.Duration
+ private byte // Reserve the right of using private data.
+}
+
+// The Bucket type encapsulates operations with an S3 bucket.
+type Bucket struct {
+ *S3
+ Name string
+}
+
+// The Owner type represents the owner of the object in an S3 bucket.
+type Owner struct {
+ ID string
+ DisplayName string
+}
+
+// Fold options into an Options struct
+//
+type Options struct {
+ SSE bool
+ SSECustomerAlgorithm string
+ SSECustomerKey string
+ SSECustomerKeyMD5 string
+ Meta map[string][]string
+ ContentEncoding string
+ CacheControl string
+ RedirectLocation string
+ ContentMD5 string
+ ContentDisposition string
+ Range string
+ // What else?
+ //// The following become headers so they are []strings rather than strings... I think
+ // x-amz-storage-class []string
+}
+
+type CopyOptions struct {
+ Options
+ CopySourceOptions string
+ MetadataDirective string
+ ContentType string
+}
+
+// CopyObjectResult is the output from a Copy request
+type CopyObjectResult struct {
+ ETag string
+ LastModified string
+}
+
+var attempts = aws.AttemptStrategy{
+ Min: 5,
+ Total: 5 * time.Second,
+ Delay: 200 * time.Millisecond,
+}
+
+// New creates a new S3.
+func New(auth aws.Auth, region aws.Region) *S3 {
+ return &S3{auth, region, 0, 0, 0}
+}
+
+// Bucket returns a Bucket with the given name.
+func (s3 *S3) Bucket(name string) *Bucket {
+ if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
+ name = strings.ToLower(name)
+ }
+ return &Bucket{s3, name}
+}
+
+type BucketInfo struct {
+ Name string
+ CreationDate string
+}
+
+type GetServiceResp struct {
+ Owner Owner
+ Buckets []BucketInfo `xml:">Bucket"`
+}
+
+// GetService gets a list of all buckets owned by an account.
+//
+// See http://goo.gl/wbHkGj for details.
+func (s3 *S3) GetService() (*GetServiceResp, error) {
+ bucket := s3.Bucket("")
+
+ r, err := bucket.Get("")
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse the XML response.
+ var resp GetServiceResp
+ if err = xml.Unmarshal(r, &resp); err != nil {
+ return nil, err
+ }
+
+ return &resp, nil
+}
+
+var createBucketConfiguration = `
+ %s
+`
+
+// locationConstraint returns an io.Reader specifying a LocationConstraint if
+// required for the region.
+//
+// See http://goo.gl/bh9Kq for details.
+func (s3 *S3) locationConstraint() io.Reader {
+ constraint := ""
+ if s3.Region.S3LocationConstraint {
+ constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
+ }
+ return strings.NewReader(constraint)
+}
+
+type ACL string
+
+const (
+ Private = ACL("private")
+ PublicRead = ACL("public-read")
+ PublicReadWrite = ACL("public-read-write")
+ AuthenticatedRead = ACL("authenticated-read")
+ BucketOwnerRead = ACL("bucket-owner-read")
+ BucketOwnerFull = ACL("bucket-owner-full-control")
+)
+
+// PutBucket creates a new bucket.
+//
+// See http://goo.gl/ndjnR for details.
+func (b *Bucket) PutBucket(perm ACL) error {
+ headers := map[string][]string{
+ "x-amz-acl": {string(perm)},
+ }
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: "/",
+ headers: headers,
+ payload: b.locationConstraint(),
+ }
+ return b.S3.query(req, nil)
+}
+
+// DelBucket removes an existing S3 bucket. All objects in the bucket must
+// be removed before the bucket itself can be removed.
+//
+// See http://goo.gl/GoBrY for details.
+func (b *Bucket) DelBucket() (err error) {
+ req := &request{
+ method: "DELETE",
+ bucket: b.Name,
+ path: "/",
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ err = b.S3.query(req, nil)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ return err
+}
+
+// Get retrieves an object from an S3 bucket.
+//
+// See http://goo.gl/isCO7 for details.
+func (b *Bucket) Get(path string) (data []byte, err error) {
+ body, err := b.GetReader(path)
+ if err != nil {
+ return nil, err
+ }
+ data, err = ioutil.ReadAll(body)
+ body.Close()
+ return data, err
+}
+
+// GetReader retrieves an object from an S3 bucket,
+// returning the body of the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading.
+func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
+ resp, err := b.GetResponse(path)
+ if resp != nil {
+ return resp.Body, err
+ }
+ return nil, err
+}
+
+// GetResponse retrieves an object from an S3 bucket,
+// returning the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading
+func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) {
+ return b.GetResponseWithHeaders(path, make(http.Header))
+}
+
+// GetReaderWithHeaders retrieves an object from an S3 bucket
+// Accepts custom headers to be sent as the second parameter
+// returning the body of the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading
+func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) {
+ req := &request{
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ }
+ err = b.S3.prepare(req)
+ if err != nil {
+ return nil, err
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ resp, err := b.S3.run(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+ }
+ panic("unreachable")
+}
+
+// Exists checks whether or not an object exists on an S3 bucket using a HEAD request.
+func (b *Bucket) Exists(path string) (exists bool, err error) {
+ req := &request{
+ method: "HEAD",
+ bucket: b.Name,
+ path: path,
+ }
+ err = b.S3.prepare(req)
+ if err != nil {
+ return
+ }
+ for attempt := attempts.Start(); attempt.Next(); {
+ resp, err := b.S3.run(req, nil)
+
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+
+ if err != nil {
+ // We can treat a 403 or 404 as non existance
+ if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ if resp.StatusCode/100 == 2 {
+ exists = true
+ }
+ if resp.Body != nil {
+ resp.Body.Close()
+ }
+ return exists, err
+ }
+ return false, fmt.Errorf("S3 Currently Unreachable")
+}
+
+// Head HEADs an object in the S3 bucket, returns the response with
+// no body see http://bit.ly/17K1ylI
+func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
+ req := &request{
+ method: "HEAD",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ }
+ err := b.S3.prepare(req)
+ if err != nil {
+ return nil, err
+ }
+
+ for attempt := attempts.Start(); attempt.Next(); {
+ resp, err := b.S3.run(req, nil)
+ if shouldRetry(err) && attempt.HasNext() {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+ }
+ return nil, fmt.Errorf("S3 Currently Unreachable")
+}
+
+// Put inserts an object into the S3 bucket.
+//
+// See http://goo.gl/FEBPD for details.
+func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error {
+ body := bytes.NewBuffer(data)
+ return b.PutReader(path, body, int64(len(data)), contType, perm, options)
+}
+
+// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key
+func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) {
+ headers := map[string][]string{
+ "x-amz-acl": {string(perm)},
+ "x-amz-copy-source": {url.QueryEscape(source)},
+ }
+ options.addHeaders(headers)
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ }
+ resp := &CopyObjectResult{}
+ err := b.S3.query(req, resp)
+ if err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+// PutReader inserts an object into the S3 bucket by consuming data
+// from r until EOF.
+func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error {
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(length, 10)},
+ "Content-Type": {contType},
+ "x-amz-acl": {string(perm)},
+ }
+ options.addHeaders(headers)
+ req := &request{
+ method: "PUT",
+ bucket: b.Name,
+ path: path,
+ headers: headers,
+ payload: r,
+ }
+ return b.S3.query(req, nil)
+}
+
+// addHeaders adds o's specified fields to headers
+func (o Options) addHeaders(headers map[string][]string) {
+ if o.SSE {
+ headers["x-amz-server-side-encryption"] = []string{"AES256"}
+ } else if len(o.SSECustomerAlgorithm) != 0 && len(o.SSECustomerKey) != 0 && len(o.SSECustomerKeyMD5) != 0 {
+ // Amazon-managed keys and customer-managed keys are mutually exclusive
+ headers["x-amz-server-side-encryption-customer-algorithm"] = []string{o.SSECustomerAlgorithm}
+ headers["x-amz-server-side-encryption-customer-key"] = []string{o.SSECustomerKey}
+ headers["x-amz-server-side-encryption-customer-key-MD5"] = []string{o.SSECustomerKeyMD5}
+ }
+ if len(o.Range) != 0 {
+ headers["Range"] = []string{o.Range}
+ }
+ if len(o.ContentEncoding) != 0 {
+ headers["Content-Encoding"] = []string{o.ContentEncoding}
+ }
+ if len(o.CacheControl) != 0 {
+ headers["Cache-Control"] = []string{o.CacheControl}
+ }
+ if len(o.ContentMD5) != 0 {
+ headers["Content-MD5"] = []string{o.ContentMD5}
+ }
+ if len(o.RedirectLocation) != 0 {
+ headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation}
+ }
+ if len(o.ContentDisposition) != 0 {
+ headers["Content-Disposition"] = []string{o.ContentDisposition}
+ }
+ for k, v := range o.Meta {
+ headers["x-amz-meta-"+k] = v
+ }
+}
+
+// addHeaders adds o's specified fields to headers
+func (o CopyOptions) addHeaders(headers map[string][]string) {
+ o.Options.addHeaders(headers)
+ if len(o.MetadataDirective) != 0 {
+ headers["x-amz-metadata-directive"] = []string{o.MetadataDirective}
+ }
+ if len(o.CopySourceOptions) != 0 {
+ headers["x-amz-copy-source-range"] = []string{o.CopySourceOptions}
+ }
+ if len(o.ContentType) != 0 {
+ headers["Content-Type"] = []string{o.ContentType}
+ }
+}
+
+func makeXmlBuffer(doc []byte) *bytes.Buffer {
+ buf := new(bytes.Buffer)
+ buf.WriteString(xml.Header)
+ buf.Write(doc)
+ return buf
+}
+
+type IndexDocument struct {
+ Suffix string `xml:"Suffix"`
+}
+
+type ErrorDocument struct {
+ Key string `xml:"Key"`
+}
+
+type RoutingRule struct {
+ ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"`
+ RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"`
+ RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"`
+}
+
+type RedirectAllRequestsTo struct {
+ HostName string `xml:"HostName"`
+ Protocol string `xml:"Protocol,omitempty"`
+}
+
+type WebsiteConfiguration struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"`
+ IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"`
+ ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"`
+ RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
+ RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
+}
+
+// PutBucketWebsite configures a bucket as a website.
+//
+// See http://goo.gl/TpRlUy for details.
+func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error {
+ doc, err := xml.Marshal(configuration)
+ if err != nil {
+ return err
+ }
+
+ buf := makeXmlBuffer(doc)
+
+ return b.PutBucketSubresource("website", buf, int64(buf.Len()))
+}
+
+func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error {
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(length, 10)},
+ }
+ req := &request{
+ path: "/",
+ method: "PUT",
+ bucket: b.Name,
+ headers: headers,
+ payload: r,
+ params: url.Values{subresource: {""}},
+ }
+
+ return b.S3.query(req, nil)
+}
+
+// Del removes an object from the S3 bucket.
+//
+// See http://goo.gl/APeTt for details.
+func (b *Bucket) Del(path string) error {
+ req := &request{
+ method: "DELETE",
+ bucket: b.Name,
+ path: path,
+ }
+ return b.S3.query(req, nil)
+}
+
+type Delete struct {
+ Quiet bool `xml:"Quiet,omitempty"`
+ Objects []Object `xml:"Object"`
+}
+
+type Object struct {
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId,omitempty"`
+}
+
+// DelMulti removes up to 1000 objects from the S3 bucket.
+//
+// See http://goo.gl/jx6cWK for details.
+func (b *Bucket) DelMulti(objects Delete) error {
+ doc, err := xml.Marshal(objects)
+ if err != nil {
+ return err
+ }
+
+ buf := makeXmlBuffer(doc)
+ digest := md5.New()
+ size, err := digest.Write(buf.Bytes())
+ if err != nil {
+ return err
+ }
+
+ headers := map[string][]string{
+ "Content-Length": {strconv.FormatInt(int64(size), 10)},
+ "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
+ "Content-Type": {"text/xml"},
+ }
+ req := &request{
+ path: "/",
+ method: "POST",
+ params: url.Values{"delete": {""}},
+ bucket: b.Name,
+ headers: headers,
+ payload: buf,
+ }
+
+ return b.S3.query(req, nil)
+}
+
+// The ListResp type holds the results of a List bucket operation.
+type ListResp struct {
+ Name string
+ Prefix string
+ Delimiter string
+ Marker string
+ MaxKeys int
+ // IsTruncated is true if the results have been truncated because
+ // there are more keys and prefixes than can fit in MaxKeys.
+ // N.B. this is the opposite sense to that documented (incorrectly) in
+ // http://goo.gl/YjQTc
+ IsTruncated bool
+ Contents []Key
+ CommonPrefixes []string `xml:">Prefix"`
+ // if IsTruncated is true, pass NextMarker as marker argument to List()
+ // to get the next set of keys
+ NextMarker string
+}
+
+// The Key type represents an item stored in an S3 bucket.
+type Key struct {
+ Key string
+ LastModified string
+ Size int64
+ // ETag gives the hex-encoded MD5 sum of the contents,
+ // surrounded with double-quotes.
+ ETag string
+ StorageClass string
+ Owner Owner
+}
+
+// List returns information about objects in an S3 bucket.
+//
+// The prefix parameter limits the response to keys that begin with the
+// specified prefix.
+//
+// The delim parameter causes the response to group all of the keys that
+// share a common prefix up to the next delimiter in a single entry within
+// the CommonPrefixes field. You can use delimiters to separate a bucket
+// into different groupings of keys, similar to how folders would work.
+//
+// The marker parameter specifies the key to start with when listing objects
+// in a bucket. Amazon S3 lists objects in alphabetical order and
+// will return keys alphabetically greater than the marker.
+//
+// The max parameter specifies how many keys + common prefixes to return in
+// the response. The default is 1000.
+//
+// For example, given these keys in a bucket:
+//
+// index.html
+// index2.html
+// photos/2006/January/sample.jpg
+// photos/2006/February/sample2.jpg
+// photos/2006/February/sample3.jpg
+// photos/2006/February/sample4.jpg
+//
+// Listing this bucket with delimiter set to "/" would yield the
+// following result:
+//
+// &ListResp{
+// Name: "sample-bucket",
+// MaxKeys: 1000,
+// Delimiter: "/",
+// Contents: []Key{
+// {Key: "index.html", "index2.html"},
+// },
+// CommonPrefixes: []string{
+// "photos/",
+// },
+// }
+//
+// Listing the same bucket with delimiter set to "/" and prefix set to
+// "photos/2006/" would yield the following result:
+//
+// &ListResp{
+// Name: "sample-bucket",
+// MaxKeys: 1000,
+// Delimiter: "/",
+// Prefix: "photos/2006/",
+// CommonPrefixes: []string{
+// "photos/2006/February/",
+// "photos/2006/January/",
+// },
+// }
+//
+// See http://goo.gl/YjQTc for details.
+func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
+ params := map[string][]string{
+ "prefix": {prefix},
+ "delimiter": {delim},
+ "marker": {marker},
+ }
+ if max != 0 {
+ params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
+ }
+ req := &request{
+ bucket: b.Name,
+ params: params,
+ }
+ result = &ListResp{}
+ for attempt := attempts.Start(); attempt.Next(); {
+ err = b.S3.query(req, result)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ // if NextMarker is not returned, it should be set to the name of last key,
+ // so let's do it so that each caller doesn't have to
+ if result.IsTruncated && result.NextMarker == "" {
+ n := len(result.Contents)
+ if n > 0 {
+ result.NextMarker = result.Contents[n-1].Key
+ }
+ }
+ return result, nil
+}
+
+// The VersionsResp type holds the results of a list bucket Versions operation.
+type VersionsResp struct {
+ Name string
+ Prefix string
+ KeyMarker string
+ VersionIdMarker string
+ MaxKeys int
+ Delimiter string
+ IsTruncated bool
+ Versions []Version `xml:"Version"`
+ CommonPrefixes []string `xml:">Prefix"`
+}
+
+// The Version type represents an object version stored in an S3 bucket.
+type Version struct {
+ Key string
+ VersionId string
+ IsLatest bool
+ LastModified string
+ // ETag gives the hex-encoded MD5 sum of the contents,
+ // surrounded with double-quotes.
+ ETag string
+ Size int64
+ Owner Owner
+ StorageClass string
+}
+
+func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) {
+ params := map[string][]string{
+ "versions": {""},
+ "prefix": {prefix},
+ "delimiter": {delim},
+ }
+
+ if len(versionIdMarker) != 0 {
+ params["version-id-marker"] = []string{versionIdMarker}
+ }
+ if len(keyMarker) != 0 {
+ params["key-marker"] = []string{keyMarker}
+ }
+
+ if max != 0 {
+ params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
+ }
+ req := &request{
+ bucket: b.Name,
+ params: params,
+ }
+ result = &VersionsResp{}
+ for attempt := attempts.Start(); attempt.Next(); {
+ err = b.S3.query(req, result)
+ if !shouldRetry(err) {
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+type GetLocationResp struct {
+ Location string `xml:",innerxml"`
+}
+
+func (b *Bucket) Location() (string, error) {
+ r, err := b.Get("/?location")
+ if err != nil {
+ return "", err
+ }
+
+ // Parse the XML response.
+ var resp GetLocationResp
+ if err = xml.Unmarshal(r, &resp); err != nil {
+ return "", err
+ }
+
+ if resp.Location == "" {
+ return "us-east-1", nil
+ } else {
+ return resp.Location, nil
+ }
+}
+
+// URL returns a non-signed URL that allows retriving the
+// object at path. It only works if the object is publicly
+// readable (see SignedURL).
+func (b *Bucket) URL(path string) string {
+ req := &request{
+ bucket: b.Name,
+ path: path,
+ }
+ err := b.S3.prepare(req)
+ if err != nil {
+ panic(err)
+ }
+ u, err := req.url()
+ if err != nil {
+ panic(err)
+ }
+ u.RawQuery = ""
+ return u.String()
+}
+
+// SignedURL returns a signed URL that allows anyone holding the URL
+// to retrieve the object at path. The signature is valid until expires.
+func (b *Bucket) SignedURL(path string, expires time.Time) string {
+ return b.SignedURLWithArgs(path, expires, nil, nil)
+}
+
+// SignedURLWithArgs returns a signed URL that allows anyone holding the URL
+// to retrieve the object at path. The signature is valid until expires.
+func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string {
+ var uv = url.Values{}
+
+ if params != nil {
+ uv = params
+ }
+
+ uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10))
+
+ req := &request{
+ bucket: b.Name,
+ path: path,
+ params: uv,
+ headers: headers,
+ }
+ err := b.S3.prepare(req)
+ if err != nil {
+ panic(err)
+ }
+ u, err := req.url()
+ if err != nil {
+ panic(err)
+ }
+ if b.S3.Auth.Token() != "" {
+ return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0])
+ } else {
+ return u.String()
+ }
+}
+
+// UploadSignedURL returns a signed URL that allows anyone holding the URL
+// to upload the object at path. The signature is valid until expires.
+// contenttype is a string like image/png
+// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself]
+func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string {
+ expire_date := expires.Unix()
+ if method != "POST" {
+ method = "PUT"
+ }
+ stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n/" + b.Name + "/" + path
+ fmt.Println("String to sign:\n", stringToSign)
+ a := b.S3.Auth
+ secretKey := a.SecretKey
+ accessId := a.AccessKey
+ mac := hmac.New(sha1.New, []byte(secretKey))
+ mac.Write([]byte(stringToSign))
+ macsum := mac.Sum(nil)
+ signature := base64.StdEncoding.EncodeToString([]byte(macsum))
+ signature = strings.TrimSpace(signature)
+
+ signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/")
+ if err != nil {
+ log.Println("ERROR sining url for S3 upload", err)
+ return ""
+ }
+ signedurl.Path += path
+ params := url.Values{}
+ params.Add("AWSAccessKeyId", accessId)
+ params.Add("Expires", strconv.FormatInt(expire_date, 10))
+ params.Add("Signature", signature)
+ if a.Token() != "" {
+ params.Add("token", a.Token())
+ }
+
+ signedurl.RawQuery = params.Encode()
+ return signedurl.String()
+}
+
+// PostFormArgs returns the action and input fields needed to allow anonymous
+// uploads to a bucket within the expiration limit
+// Additional conditions can be specified with conds
+func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) {
+ conditions := make([]string, 0)
+ fields = map[string]string{
+ "AWSAccessKeyId": b.Auth.AccessKey,
+ "key": path,
+ }
+
+ if conds != nil {
+ conditions = append(conditions, conds...)
+ }
+
+ conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path))
+ conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name))
+ if redirect != "" {
+ conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect))
+ fields["success_action_redirect"] = redirect
+ }
+
+ vExpiration := expires.Format("2006-01-02T15:04:05Z")
+ vConditions := strings.Join(conditions, ",")
+ policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions)
+ policy64 := base64.StdEncoding.EncodeToString([]byte(policy))
+ fields["policy"] = policy64
+
+ signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey))
+ signer.Write([]byte(policy64))
+ fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil))
+
+ action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name)
+ return
+}
+
+// PostFormArgs returns the action and input fields needed to allow anonymous
+// uploads to a bucket within the expiration limit
+func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) {
+ return b.PostFormArgsEx(path, expires, redirect, nil)
+}
+
+type request struct {
+ method string
+ bucket string
+ path string
+ params url.Values
+ headers http.Header
+ baseurl string
+ payload io.Reader
+ prepared bool
+}
+
+func (req *request) url() (*url.URL, error) {
+ u, err := url.Parse(req.baseurl)
+ if err != nil {
+ return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
+ }
+ u.RawQuery = req.params.Encode()
+ u.Path = req.path
+ return u, nil
+}
+
+// query prepares and runs the req request.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (s3 *S3) query(req *request, resp interface{}) error {
+ err := s3.prepare(req)
+ if err != nil {
+ return err
+ }
+ r, err := s3.run(req, resp)
+ if r != nil && r.Body != nil {
+ r.Body.Close()
+ }
+ return err
+}
+
+// queryV4Signprepares and runs the req request, signed with aws v4 signatures.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (s3 *S3) queryV4Sign(req *request, resp interface{}) error {
+ if req.headers == nil {
+ req.headers = map[string][]string{}
+ }
+
+ s3.setBaseURL(req)
+
+ hreq, err := s3.setupHttpRequest(req)
+ if err != nil {
+ return err
+ }
+
+ // req.Host must be set for V4 signature calculation
+ hreq.Host = hreq.URL.Host
+
+ signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region)
+ signer.IncludeXAmzContentSha256 = true
+ signer.Sign(hreq)
+
+ _, err = s3.doHttpRequest(hreq, resp)
+ return err
+}
+
+// Sets baseurl on req from bucket name and the region endpoint
+func (s3 *S3) setBaseURL(req *request) error {
+ if req.bucket == "" {
+ req.baseurl = s3.Region.S3Endpoint
+ } else {
+ req.baseurl = s3.Region.S3BucketEndpoint
+ if req.baseurl == "" {
+ // Use the path method to address the bucket.
+ req.baseurl = s3.Region.S3Endpoint
+ req.path = "/" + req.bucket + req.path
+ } else {
+ // Just in case, prevent injection.
+ if strings.IndexAny(req.bucket, "/:@") >= 0 {
+ return fmt.Errorf("bad S3 bucket: %q", req.bucket)
+ }
+ req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
+ }
+ }
+
+ return nil
+}
+
+// partiallyEscapedPath partially escapes the S3 path allowing for all S3 REST API calls.
+//
+// Some commands including:
+// GET Bucket acl http://goo.gl/aoXflF
+// GET Bucket cors http://goo.gl/UlmBdx
+// GET Bucket lifecycle http://goo.gl/8Fme7M
+// GET Bucket policy http://goo.gl/ClXIo3
+// GET Bucket location http://goo.gl/5lh8RD
+// GET Bucket Logging http://goo.gl/sZ5ckF
+// GET Bucket notification http://goo.gl/qSSZKD
+// GET Bucket tagging http://goo.gl/QRvxnM
+// require the first character after the bucket name in the path to be a literal '?' and
+// not the escaped hex representation '%3F'.
+func partiallyEscapedPath(path string) string {
+ pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/")
+ if len(pathEscapedAndSplit) >= 3 {
+ if len(pathEscapedAndSplit[2]) >= 3 {
+ // Check for the one "?" that should not be escaped.
+ if pathEscapedAndSplit[2][0:3] == "%3F" {
+ pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:]
+ }
+ }
+ }
+ return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1)
+}
+
+// prepare sets up req to be delivered to S3.
+func (s3 *S3) prepare(req *request) error {
+ var signpath = req.path
+
+ if !req.prepared {
+ req.prepared = true
+ if req.method == "" {
+ req.method = "GET"
+ }
+ // Copy so they can be mutated without affecting on retries.
+ params := make(url.Values)
+ headers := make(http.Header)
+ for k, v := range req.params {
+ params[k] = v
+ }
+ for k, v := range req.headers {
+ headers[k] = v
+ }
+ req.params = params
+ req.headers = headers
+ if !strings.HasPrefix(req.path, "/") {
+ req.path = "/" + req.path
+ }
+ signpath = req.path
+
+ err := s3.setBaseURL(req)
+ if err != nil {
+ return err
+ }
+ if req.bucket != "" {
+ signpath = "/" + req.bucket + signpath
+ }
+ }
+
+ // Always sign again as it's not clear how far the
+ // server has handled a previous attempt.
+ u, err := url.Parse(req.baseurl)
+ if err != nil {
+ return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
+ }
+
+ signpathPatiallyEscaped := partiallyEscapedPath(signpath)
+ req.headers["Host"] = []string{u.Host}
+ req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
+ if s3.Auth.Token() != "" {
+ req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()}
+ }
+ sign(s3.Auth, req.method, signpathPatiallyEscaped, req.params, req.headers)
+ return nil
+}
+
+// Prepares an *http.Request for doHttpRequest
+func (s3 *S3) setupHttpRequest(req *request) (*http.Request, error) {
+ u, err := req.url()
+ if err != nil {
+ return nil, err
+ }
+ u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path))
+
+ hreq := http.Request{
+ URL: u,
+ Method: req.method,
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Close: true,
+ Header: req.headers,
+ }
+
+ if v, ok := req.headers["Content-Length"]; ok {
+ hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
+ delete(req.headers, "Content-Length")
+ }
+ if req.payload != nil {
+ hreq.Body = ioutil.NopCloser(req.payload)
+ }
+
+ return &hreq, nil
+}
+
+// doHttpRequest sends hreq and returns the http response from the server.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (s3 *S3) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) {
+ c := http.Client{
+ Transport: &http.Transport{
+ Dial: func(netw, addr string) (c net.Conn, err error) {
+ deadline := time.Now().Add(s3.ReadTimeout)
+ if s3.ConnectTimeout > 0 {
+ c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout)
+ } else {
+ c, err = net.Dial(netw, addr)
+ }
+ if err != nil {
+ return
+ }
+ if s3.ReadTimeout > 0 {
+ err = c.SetDeadline(deadline)
+ }
+ return
+ },
+ Proxy: http.ProxyFromEnvironment,
+ },
+ }
+
+ hresp, err := c.Do(hreq)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ dump, _ := httputil.DumpResponse(hresp, true)
+ log.Printf("} -> %s\n", dump)
+ }
+ if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 {
+ return nil, buildError(hresp)
+ }
+ if resp != nil {
+ err = xml.NewDecoder(hresp.Body).Decode(resp)
+ hresp.Body.Close()
+
+ if debug {
+ log.Printf("goamz.s3> decoded xml into %#v", resp)
+ }
+
+ }
+ return hresp, err
+}
+
+// run sends req and returns the http response from the server.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
+ if debug {
+ log.Printf("Running S3 request: %#v", req)
+ }
+
+ hreq, err := s3.setupHttpRequest(req)
+ if err != nil {
+ return nil, err
+ }
+
+ return s3.doHttpRequest(hreq, resp)
+}
+
+// Error represents an error in an operation with S3.
+type Error struct {
+ StatusCode int // HTTP status code (200, 403, ...)
+ Code string // EC2 error code ("UnsupportedOperation", ...)
+ Message string // The human-oriented error message
+ BucketName string
+ RequestId string
+ HostId string
+}
+
+func (e *Error) Error() string {
+ return e.Message
+}
+
+func buildError(r *http.Response) error {
+ if debug {
+ log.Printf("got error (status code %v)", r.StatusCode)
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("\tread error: %v", err)
+ } else {
+ log.Printf("\tdata:\n%s\n\n", data)
+ }
+ r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
+ }
+
+ err := Error{}
+ // TODO return error if Unmarshal fails?
+ xml.NewDecoder(r.Body).Decode(&err)
+ r.Body.Close()
+ err.StatusCode = r.StatusCode
+ if err.Message == "" {
+ err.Message = r.Status
+ }
+ if debug {
+ log.Printf("err: %#v\n", err)
+ }
+ return &err
+}
+
+func shouldRetry(err error) bool {
+ if err == nil {
+ return false
+ }
+ switch err {
+ case io.ErrUnexpectedEOF, io.EOF:
+ return true
+ }
+ switch e := err.(type) {
+ case *net.DNSError:
+ return true
+ case *net.OpError:
+ switch e.Op {
+ case "read", "write":
+ return true
+ }
+ case *Error:
+ switch e.Code {
+ case "InternalError", "NoSuchUpload", "NoSuchBucket":
+ return true
+ }
+ }
+ return false
+}
+
+func hasCode(err error, code string) bool {
+ s3err, ok := err.(*Error)
+ return ok && s3err.Code == code
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3_test.go
new file mode 100644
index 00000000..4f474da4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3_test.go
@@ -0,0 +1,486 @@
+package s3_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/crowdmob/goamz/aws"
+ "github.com/crowdmob/goamz/s3"
+ "github.com/crowdmob/goamz/testutil"
+ "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+type S struct {
+ s3 *s3.S3
+}
+
+var _ = check.Suite(&S{})
+
+var testServer = testutil.NewHTTPServer()
+
+func (s *S) SetUpSuite(c *check.C) {
+ testServer.Start()
+ auth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
+ s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
+}
+
+func (s *S) TearDownSuite(c *check.C) {
+ s3.SetAttemptStrategy(nil)
+}
+
+func (s *S) SetUpTest(c *check.C) {
+ attempts := aws.AttemptStrategy{
+ Total: 300 * time.Millisecond,
+ Delay: 100 * time.Millisecond,
+ }
+ s3.SetAttemptStrategy(&attempts)
+}
+
+func (s *S) TearDownTest(c *check.C) {
+ testServer.Flush()
+}
+
+func (s *S) DisableRetries() {
+ s3.SetAttemptStrategy(&aws.AttemptStrategy{})
+}
+
+// PutBucket docs: http://goo.gl/kBTCu
+
+func (s *S) TestPutBucket(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+}
+
+// PutBucketWebsite docs: http://goo.gl/TpRlUy
+
+func (s *S) TestPutBucketWebsite(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ config := s3.WebsiteConfiguration{
+ RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{HostName: "example.com"},
+ }
+ err := b.PutBucketWebsite(config)
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ body, err := ioutil.ReadAll(req.Body)
+ req.Body.Close()
+ c.Assert(err, check.IsNil)
+ c.Assert(string(body), check.Equals, BucketWebsiteConfigurationDump)
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/")
+ c.Assert(req.URL.RawQuery, check.Equals, "website=")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+}
+
+// Head docs: http://bit.ly/17K1ylI
+
+func (s *S) TestHead(c *check.C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ resp, err := b.Head("name", nil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "HEAD")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+
+ c.Assert(err, check.IsNil)
+ c.Assert(resp.ContentLength, check.FitsTypeOf, int64(0))
+ c.Assert(resp, check.FitsTypeOf, &http.Response{})
+}
+
+// DeleteBucket docs: http://goo.gl/GoBrY
+
+func (s *S) TestDelBucket(c *check.C) {
+ testServer.Response(204, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.DelBucket()
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "DELETE")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+}
+
+// GetObject docs: http://goo.gl/isCO7
+
+func (s *S) TestGet(c *check.C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ data, err := b.Get("name")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Equals, "content")
+}
+
+func (s *S) TestGetWithPlus(c *check.C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ _, err := b.Get("has+plus")
+
+ req := testServer.WaitRequest()
+ c.Assert(err, check.IsNil)
+ c.Assert(req.RequestURI, check.Equals, "http://localhost:4444/bucket/has%2Bplus")
+}
+
+func (s *S) TestURL(c *check.C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ url := b.URL("name")
+ r, err := http.Get(url)
+ c.Assert(err, check.IsNil)
+ data, err := ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Equals, "content")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+}
+
+func (s *S) TestGetReader(c *check.C) {
+ testServer.Response(200, nil, "content")
+
+ b := s.s3.Bucket("bucket")
+ rc, err := b.GetReader("name")
+ c.Assert(err, check.IsNil)
+ data, err := ioutil.ReadAll(rc)
+ rc.Close()
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Equals, "content")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+}
+
+func (s *S) TestGetNotFound(c *check.C) {
+ for i := 0; i < 10; i++ {
+ testServer.Response(404, nil, GetObjectErrorDump)
+ }
+
+ b := s.s3.Bucket("non-existent-bucket")
+ data, err := b.Get("non-existent")
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/non-existent-bucket/non-existent")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+
+ s3err, _ := err.(*s3.Error)
+ c.Assert(s3err, check.NotNil)
+ c.Assert(s3err.StatusCode, check.Equals, 404)
+ c.Assert(s3err.BucketName, check.Equals, "non-existent-bucket")
+ c.Assert(s3err.RequestId, check.Equals, "3F1B667FAD71C3D8")
+ c.Assert(s3err.HostId, check.Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
+ c.Assert(s3err.Code, check.Equals, "NoSuchBucket")
+ c.Assert(s3err.Message, check.Equals, "The specified bucket does not exist")
+ c.Assert(s3err.Error(), check.Equals, "The specified bucket does not exist")
+ c.Assert(data, check.IsNil)
+}
+
+// PutObject docs: http://goo.gl/FEBPD
+
+func (s *S) TestPutObject(c *check.C) {
+ testServer.Response(200, nil, "")
+ const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\""
+
+ b := s.s3.Bucket("bucket")
+ err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{ContentDisposition: DISPOSITION})
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"})
+ c.Assert(req.Header["Content-Disposition"], check.DeepEquals, []string{DISPOSITION})
+ //c.Assert(req.Header["Content-MD5"], gocheck.DeepEquals, "...")
+ c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
+}
+
+// PutCopy docs: http://goo.gl/mhEHtA
+func (s *S) TestPutCopy(c *check.C) {
+ testServer.Response(200, nil, PutCopyResultDump)
+
+ b := s.s3.Bucket("bucket")
+ res, err := b.PutCopy("name", s3.Private, s3.CopyOptions{},
+ // 0xFC is ü - 0xE9 is é
+ "source-bucket/\u00FCber-fil\u00E9.jpg")
+ c.Assert(err, check.IsNil)
+ c.Assert(res, check.DeepEquals, &s3.CopyObjectResult{
+ ETag: `"9b2cf535f27731c974343645a3985328"`,
+ LastModified: `2009-10-28T22:32:00`})
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"0"})
+ c.Assert(req.Header["X-Amz-Copy-Source"], check.DeepEquals, []string{`source-bucket%2F%C3%BCber-fil%C3%A9.jpg`})
+ c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
+}
+
+func (s *S) TestPutObjectReadTimeout(c *check.C) {
+ s.s3.ReadTimeout = 50 * time.Millisecond
+ defer func() {
+ s.s3.ReadTimeout = 0
+ }()
+
+ b := s.s3.Bucket("bucket")
+ err := b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
+
+ // Make sure that we get a timeout error.
+ c.Assert(err, check.NotNil)
+
+ // Set the response after the request times out so that the next request will work.
+ testServer.Response(200, nil, "")
+
+ // This time set the response within our timeout period so that we expect the call
+ // to return successfully.
+ go func() {
+ time.Sleep(25 * time.Millisecond)
+ testServer.Response(200, nil, "")
+ }()
+ err = b.Put("name", []byte("content"), "content-type", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+}
+
+func (s *S) TestPutReader(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ buf := bytes.NewBufferString("content")
+ err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "PUT")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.DeepEquals), []string{""})
+ c.Assert(req.Header["Content-Type"], check.DeepEquals, []string{"content-type"})
+ c.Assert(req.Header["Content-Length"], check.DeepEquals, []string{"7"})
+ //c.Assert(req.Header["Content-MD5"], gocheck.Equals, "...")
+ c.Assert(req.Header["X-Amz-Acl"], check.DeepEquals, []string{"private"})
+}
+
+// DelObject docs: http://goo.gl/APeTt
+
+func (s *S) TestDelObject(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ err := b.Del("name")
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "DELETE")
+ c.Assert(req.URL.Path, check.Equals, "/bucket/name")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+}
+
+func (s *S) TestDelMultiObjects(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ objects := []s3.Object{s3.Object{Key: "test"}}
+ err := b.DelMulti(s3.Delete{
+ Quiet: false,
+ Objects: objects,
+ })
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "POST")
+ c.Assert(req.URL.RawQuery, check.Equals, "delete=")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+ c.Assert(req.Header["Content-MD5"], check.Not(check.Equals), "")
+ c.Assert(req.Header["Content-Type"], check.Not(check.Equals), "")
+ c.Assert(req.ContentLength, check.Not(check.Equals), "")
+}
+
+// Bucket List Objects docs: http://goo.gl/YjQTc
+
+func (s *S) TestList(c *check.C) {
+ testServer.Response(200, nil, GetListResultDump1)
+
+ b := s.s3.Bucket("quotes")
+
+ data, err := b.List("N", "", "", 0)
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/quotes/")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+ c.Assert(req.Form["prefix"], check.DeepEquals, []string{"N"})
+ c.Assert(req.Form["delimiter"], check.DeepEquals, []string{""})
+ c.Assert(req.Form["marker"], check.DeepEquals, []string{""})
+ c.Assert(req.Form["max-keys"], check.DeepEquals, []string(nil))
+
+ c.Assert(data.Name, check.Equals, "quotes")
+ c.Assert(data.Prefix, check.Equals, "N")
+ c.Assert(data.IsTruncated, check.Equals, false)
+ c.Assert(len(data.Contents), check.Equals, 2)
+
+ c.Assert(data.Contents[0].Key, check.Equals, "Nelson")
+ c.Assert(data.Contents[0].LastModified, check.Equals, "2006-01-01T12:00:00.000Z")
+ c.Assert(data.Contents[0].ETag, check.Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
+ c.Assert(data.Contents[0].Size, check.Equals, int64(5))
+ c.Assert(data.Contents[0].StorageClass, check.Equals, "STANDARD")
+ c.Assert(data.Contents[0].Owner.ID, check.Equals, "bcaf161ca5fb16fd081034f")
+ c.Assert(data.Contents[0].Owner.DisplayName, check.Equals, "webfile")
+
+ c.Assert(data.Contents[1].Key, check.Equals, "Neo")
+ c.Assert(data.Contents[1].LastModified, check.Equals, "2006-01-01T12:00:00.000Z")
+ c.Assert(data.Contents[1].ETag, check.Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
+ c.Assert(data.Contents[1].Size, check.Equals, int64(4))
+ c.Assert(data.Contents[1].StorageClass, check.Equals, "STANDARD")
+ c.Assert(data.Contents[1].Owner.ID, check.Equals, "bcaf1ffd86a5fb16fd081034f")
+ c.Assert(data.Contents[1].Owner.DisplayName, check.Equals, "webfile")
+}
+
+func (s *S) TestListWithDelimiter(c *check.C) {
+ testServer.Response(200, nil, GetListResultDump2)
+
+ b := s.s3.Bucket("quotes")
+
+ data, err := b.List("photos/2006/", "/", "some-marker", 1000)
+ c.Assert(err, check.IsNil)
+
+ req := testServer.WaitRequest()
+ c.Assert(req.Method, check.Equals, "GET")
+ c.Assert(req.URL.Path, check.Equals, "/quotes/")
+ c.Assert(req.Header["Date"], check.Not(check.Equals), "")
+ c.Assert(req.Form["prefix"], check.DeepEquals, []string{"photos/2006/"})
+ c.Assert(req.Form["delimiter"], check.DeepEquals, []string{"/"})
+ c.Assert(req.Form["marker"], check.DeepEquals, []string{"some-marker"})
+ c.Assert(req.Form["max-keys"], check.DeepEquals, []string{"1000"})
+
+ c.Assert(data.Name, check.Equals, "example-bucket")
+ c.Assert(data.Prefix, check.Equals, "photos/2006/")
+ c.Assert(data.Delimiter, check.Equals, "/")
+ c.Assert(data.Marker, check.Equals, "some-marker")
+ c.Assert(data.IsTruncated, check.Equals, false)
+ c.Assert(len(data.Contents), check.Equals, 0)
+ c.Assert(data.CommonPrefixes, check.DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
+}
+
+func (s *S) TestExists(c *check.C) {
+ testServer.Response(200, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ result, err := b.Exists("name")
+
+ req := testServer.WaitRequest()
+
+ c.Assert(req.Method, check.Equals, "HEAD")
+
+ c.Assert(err, check.IsNil)
+ c.Assert(result, check.Equals, true)
+}
+
+func (s *S) TestExistsNotFound404(c *check.C) {
+ testServer.Response(404, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ result, err := b.Exists("name")
+
+ req := testServer.WaitRequest()
+
+ c.Assert(req.Method, check.Equals, "HEAD")
+
+ c.Assert(err, check.IsNil)
+ c.Assert(result, check.Equals, false)
+}
+
+func (s *S) TestExistsNotFound403(c *check.C) {
+ testServer.Response(403, nil, "")
+
+ b := s.s3.Bucket("bucket")
+ result, err := b.Exists("name")
+
+ req := testServer.WaitRequest()
+
+ c.Assert(req.Method, check.Equals, "HEAD")
+
+ c.Assert(err, check.IsNil)
+ c.Assert(result, check.Equals, false)
+}
+
+func (s *S) TestGetService(c *check.C) {
+ testServer.Response(200, nil, GetServiceDump)
+
+ expected := s3.GetServiceResp{
+ Owner: s3.Owner{
+ ID: "bcaf1ffd86f461ca5fb16fd081034f",
+ DisplayName: "webfile",
+ },
+ Buckets: []s3.BucketInfo{
+ s3.BucketInfo{
+ Name: "quotes",
+ CreationDate: "2006-02-03T16:45:09.000Z",
+ },
+ s3.BucketInfo{
+ Name: "samples",
+ CreationDate: "2006-02-03T16:41:58.000Z",
+ },
+ },
+ }
+
+ received, err := s.s3.GetService()
+
+ c.Assert(err, check.IsNil)
+ c.Assert(*received, check.DeepEquals, expected)
+}
+
+func (s *S) TestLocation(c *check.C) {
+ testServer.Response(200, nil, GetLocationUsStandard)
+ expectedUsStandard := "us-east-1"
+
+ bucketUsStandard := s.s3.Bucket("us-east-1")
+ resultUsStandard, err := bucketUsStandard.Location()
+
+ c.Assert(err, check.IsNil)
+ c.Assert(resultUsStandard, check.Equals, expectedUsStandard)
+
+ testServer.Response(200, nil, GetLocationUsWest1)
+ expectedUsWest1 := "us-west-1"
+
+ bucketUsWest1 := s.s3.Bucket("us-west-1")
+ resultUsWest1, err := bucketUsWest1.Location()
+
+ c.Assert(err, check.IsNil)
+ c.Assert(resultUsWest1, check.Equals, expectedUsWest1)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3i_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3i_test.go
new file mode 100644
index 00000000..a8979e02
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3i_test.go
@@ -0,0 +1,589 @@
+package s3_test
+
+import (
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ "github.com/crowdmob/goamz/aws"
+ "github.com/crowdmob/goamz/s3"
+ "github.com/crowdmob/goamz/testutil"
+ "gopkg.in/check.v1"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sort"
+ "strings"
+ "time"
+)
+
+// AmazonServer represents an Amazon S3 server.
+type AmazonServer struct {
+ auth aws.Auth
+}
+
+func (s *AmazonServer) SetUp(c *check.C) {
+ auth, err := aws.EnvAuth()
+ if err != nil {
+ c.Fatal(err.Error())
+ }
+ s.auth = auth
+}
+
+var _ = check.Suite(&AmazonClientSuite{Region: aws.USEast})
+var _ = check.Suite(&AmazonClientSuite{Region: aws.EUWest})
+var _ = check.Suite(&AmazonDomainClientSuite{Region: aws.USEast})
+
+// AmazonClientSuite tests the client against a live S3 server.
+type AmazonClientSuite struct {
+ aws.Region
+ srv AmazonServer
+ ClientTests
+}
+
+func (s *AmazonClientSuite) SetUpSuite(c *check.C) {
+ if !testutil.Amazon {
+ c.Skip("live tests against AWS disabled (no -amazon)")
+ }
+ s.srv.SetUp(c)
+ s.s3 = s3.New(s.srv.auth, s.Region)
+ // In case tests were interrupted in the middle before.
+ s.ClientTests.Cleanup()
+}
+
+func (s *AmazonClientSuite) TearDownTest(c *check.C) {
+ s.ClientTests.Cleanup()
+}
+
+// AmazonDomainClientSuite tests the client against a live S3
+// server using bucket names in the endpoint domain name rather
+// than the request path.
+type AmazonDomainClientSuite struct {
+ aws.Region
+ srv AmazonServer
+ ClientTests
+}
+
+func (s *AmazonDomainClientSuite) SetUpSuite(c *check.C) {
+ if !testutil.Amazon {
+ c.Skip("live tests against AWS disabled (no -amazon)")
+ }
+ s.srv.SetUp(c)
+ region := s.Region
+ region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
+ s.s3 = s3.New(s.srv.auth, region)
+ s.ClientTests.Cleanup()
+}
+
+func (s *AmazonDomainClientSuite) TearDownTest(c *check.C) {
+ s.ClientTests.Cleanup()
+}
+
+// ClientTests defines integration tests designed to test the client.
+// It is not used as a test suite in itself, but embedded within
+// another type.
+type ClientTests struct {
+ s3 *s3.S3
+ authIsBroken bool
+}
+
+func (s *ClientTests) Cleanup() {
+ killBucket(testBucket(s.s3))
+}
+
+func testBucket(s *s3.S3) *s3.Bucket {
+ // Watch out! If this function is corrupted and made to match with something
+ // people own, killBucket will happily remove *everything* inside the bucket.
+ key := s.Auth.AccessKey
+ if len(key) >= 8 {
+ key = s.Auth.AccessKey[:8]
+ }
+ return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
+}
+
+var attempts = aws.AttemptStrategy{
+ Min: 5,
+ Total: 20 * time.Second,
+ Delay: 100 * time.Millisecond,
+}
+
+func killBucket(b *s3.Bucket) {
+ var err error
+ for attempt := attempts.Start(); attempt.Next(); {
+ err = b.DelBucket()
+ if err == nil {
+ return
+ }
+ if _, ok := err.(*net.DNSError); ok {
+ return
+ }
+ e, ok := err.(*s3.Error)
+ if ok && e.Code == "NoSuchBucket" {
+ return
+ }
+ if ok && e.Code == "BucketNotEmpty" {
+ // Errors are ignored here. Just retry.
+ resp, err := b.List("", "", "", 1000)
+ if err == nil {
+ for _, key := range resp.Contents {
+ _ = b.Del(key.Key)
+ }
+ }
+ multis, _, _ := b.ListMulti("", "")
+ for _, m := range multis {
+ _ = m.Abort()
+ }
+ }
+ }
+ message := "cannot delete test bucket"
+ if err != nil {
+ message += ": " + err.Error()
+ }
+ panic(message)
+}
+
+func get(url string) ([]byte, error) {
+ for attempt := attempts.Start(); attempt.Next(); {
+ resp, err := http.Get(url)
+ if err != nil {
+ if attempt.HasNext() {
+ continue
+ }
+ return nil, err
+ }
+ data, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ if attempt.HasNext() {
+ continue
+ }
+ return nil, err
+ }
+ return data, err
+ }
+ panic("unreachable")
+}
+
+func (s *ClientTests) TestBasicFunctionality(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.PublicRead)
+ c.Assert(err, check.IsNil)
+
+ err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead, s3.Options{})
+ c.Assert(err, check.IsNil)
+ defer b.Del("name")
+
+ data, err := b.Get("name")
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Equals, "yo!")
+
+ data, err = get(b.URL("name"))
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Equals, "yo!")
+
+ buf := bytes.NewBufferString("hey!")
+ err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ defer b.Del("name2")
+
+ rc, err := b.GetReader("name2")
+ c.Assert(err, check.IsNil)
+ data, err = ioutil.ReadAll(rc)
+ c.Check(err, check.IsNil)
+ c.Check(string(data), check.Equals, "hey!")
+ rc.Close()
+
+ data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Equals, "hey!")
+
+ if !s.authIsBroken {
+ data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
+ c.Assert(err, check.IsNil)
+ c.Assert(string(data), check.Matches, "(?s).*AccessDenied.*")
+ }
+
+ err = b.DelBucket()
+ c.Assert(err, check.NotNil)
+
+ s3err, ok := err.(*s3.Error)
+ c.Assert(ok, check.Equals, true)
+ c.Assert(s3err.Code, check.Equals, "BucketNotEmpty")
+ c.Assert(s3err.BucketName, check.Equals, b.Name)
+ c.Assert(s3err.Message, check.Equals, "The bucket you tried to delete is not empty")
+
+ err = b.Del("name")
+ c.Assert(err, check.IsNil)
+ err = b.Del("name2")
+ c.Assert(err, check.IsNil)
+
+ err = b.DelBucket()
+ c.Assert(err, check.IsNil)
+}
+
+func (s *ClientTests) TestGetNotFound(c *check.C) {
+ b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
+ data, err := b.Get("non-existent")
+
+ s3err, _ := err.(*s3.Error)
+ c.Assert(s3err, check.NotNil)
+ c.Assert(s3err.StatusCode, check.Equals, 404)
+ c.Assert(s3err.Code, check.Equals, "NoSuchBucket")
+ c.Assert(s3err.Message, check.Equals, "The specified bucket does not exist")
+ c.Assert(data, check.IsNil)
+}
+
+// Communicate with all endpoints to see if they are alive.
+func (s *ClientTests) TestRegions(c *check.C) {
+ errs := make(chan error, len(aws.Regions))
+ for _, region := range aws.Regions {
+ go func(r aws.Region) {
+ s := s3.New(s.s3.Auth, r)
+ b := s.Bucket("goamz-" + s.Auth.AccessKey)
+ _, err := b.Get("non-existent")
+ errs <- err
+ }(region)
+ }
+ for _ = range aws.Regions {
+ err := <-errs
+ if err != nil {
+ s3_err, ok := err.(*s3.Error)
+ if ok {
+ c.Check(s3_err.Code, check.Matches, "NoSuchBucket")
+ } else if _, ok = err.(*net.DNSError); ok {
+ // Okay as well.
+ } else {
+ c.Errorf("Non-S3 error: %s", err)
+ }
+ } else {
+ c.Errorf("Test should have errored but it seems to have succeeded")
+ }
+ }
+}
+
+var objectNames = []string{
+ "index.html",
+ "index2.html",
+ "photos/2006/February/sample2.jpg",
+ "photos/2006/February/sample3.jpg",
+ "photos/2006/February/sample4.jpg",
+ "photos/2006/January/sample.jpg",
+ "test/bar",
+ "test/foo",
+}
+
+func keys(names ...string) []s3.Key {
+ ks := make([]s3.Key, len(names))
+ for i, name := range names {
+ ks[i].Key = name
+ }
+ return ks
+}
+
+// As the ListResp specifies all the parameters to the
+// request too, we use it to specify request parameters
+// and expected results. The Contents field is
+// used only for the key names inside it.
+var listTests = []s3.ListResp{
+ // normal list.
+ {
+ Contents: keys(objectNames...),
+ }, {
+ Marker: objectNames[0],
+ Contents: keys(objectNames[1:]...),
+ }, {
+ Marker: objectNames[0] + "a",
+ Contents: keys(objectNames[1:]...),
+ }, {
+ Marker: "z",
+ },
+
+ // limited results.
+ {
+ MaxKeys: 2,
+ Contents: keys(objectNames[0:2]...),
+ IsTruncated: true,
+ }, {
+ MaxKeys: 2,
+ Marker: objectNames[0],
+ Contents: keys(objectNames[1:3]...),
+ IsTruncated: true,
+ }, {
+ MaxKeys: 2,
+ Marker: objectNames[len(objectNames)-2],
+ Contents: keys(objectNames[len(objectNames)-1:]...),
+ },
+
+ // with delimiter
+ {
+ Delimiter: "/",
+ CommonPrefixes: []string{"photos/", "test/"},
+ Contents: keys("index.html", "index2.html"),
+ }, {
+ Delimiter: "/",
+ Prefix: "photos/2006/",
+ CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
+ }, {
+ Delimiter: "/",
+ Prefix: "t",
+ CommonPrefixes: []string{"test/"},
+ }, {
+ Delimiter: "/",
+ MaxKeys: 1,
+ Contents: keys("index.html"),
+ IsTruncated: true,
+ }, {
+ Delimiter: "/",
+ MaxKeys: 1,
+ Marker: "index2.html",
+ CommonPrefixes: []string{"photos/"},
+ IsTruncated: true,
+ }, {
+ Delimiter: "/",
+ MaxKeys: 1,
+ Marker: "photos/",
+ CommonPrefixes: []string{"test/"},
+ IsTruncated: false,
+ }, {
+ Delimiter: "Feb",
+ CommonPrefixes: []string{"photos/2006/Feb"},
+ Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
+ },
+}
+
+func (s *ClientTests) TestDoublePutBucket(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.PublicRead)
+ c.Assert(err, check.IsNil)
+
+ err = b.PutBucket(s3.PublicRead)
+ if err != nil {
+ c.Assert(err, check.FitsTypeOf, new(s3.Error))
+ c.Assert(err.(*s3.Error).Code, check.Equals, "BucketAlreadyOwnedByYou")
+ }
+}
+
+func (s *ClientTests) TestBucketList(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, check.IsNil)
+
+ objData := make(map[string][]byte)
+ for i, path := range objectNames {
+ data := []byte(strings.Repeat("a", i))
+ err := b.Put(path, data, "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ defer b.Del(path)
+ objData[path] = data
+ }
+
+ for i, t := range listTests {
+ c.Logf("test %d", i)
+ resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.Name, check.Equals, b.Name)
+ c.Check(resp.Delimiter, check.Equals, t.Delimiter)
+ c.Check(resp.IsTruncated, check.Equals, t.IsTruncated)
+ c.Check(resp.CommonPrefixes, check.DeepEquals, t.CommonPrefixes)
+ checkContents(c, resp.Contents, objData, t.Contents)
+ }
+}
+
+func etag(data []byte) string {
+ sum := md5.New()
+ sum.Write(data)
+ return fmt.Sprintf(`"%x"`, sum.Sum(nil))
+}
+
+func checkContents(c *check.C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
+ c.Assert(contents, check.HasLen, len(expected))
+ for i, k := range contents {
+ c.Check(k.Key, check.Equals, expected[i].Key)
+ // TODO mtime
+ c.Check(k.Size, check.Equals, int64(len(data[k.Key])))
+ c.Check(k.ETag, check.Equals, etag(data[k.Key]))
+ }
+}
+
+func (s *ClientTests) TestMultiInitPutList(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, check.IsNil)
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ c.Assert(multi.UploadId, check.Matches, ".+")
+ defer multi.Abort()
+
+ var sent []s3.Part
+
+ for i := 0; i < 5; i++ {
+ p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("", i+1)))
+ c.Assert(err, check.IsNil)
+ c.Assert(p.N, check.Equals, i+1)
+ c.Assert(p.Size, check.Equals, int64(8))
+ c.Assert(p.ETag, check.Matches, ".+")
+ sent = append(sent, p)
+ }
+
+ s3.SetListPartsMax(2)
+
+ parts, err := multi.ListParts()
+ c.Assert(err, check.IsNil)
+ c.Assert(parts, check.HasLen, len(sent))
+ for i := range parts {
+ c.Assert(parts[i].N, check.Equals, sent[i].N)
+ c.Assert(parts[i].Size, check.Equals, sent[i].Size)
+ c.Assert(parts[i].ETag, check.Equals, sent[i].ETag)
+ }
+
+ err = multi.Complete(parts)
+ s3err, failed := err.(*s3.Error)
+ c.Assert(failed, check.Equals, true)
+ c.Assert(s3err.Code, check.Equals, "EntityTooSmall")
+
+ err = multi.Abort()
+ c.Assert(err, check.IsNil)
+ _, err = multi.ListParts()
+ s3err, ok := err.(*s3.Error)
+ c.Assert(ok, check.Equals, true)
+ c.Assert(s3err.Code, check.Equals, "NoSuchUpload")
+}
+
+// This may take a minute or more due to the minimum size accepted S3
+// on multipart upload parts.
+func (s *ClientTests) TestMultiComplete(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, check.IsNil)
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ c.Assert(multi.UploadId, check.Matches, ".+")
+ defer multi.Abort()
+
+ // Minimum size S3 accepts for all but the last part is 5MB.
+ data1 := make([]byte, 5*1024*1024)
+ data2 := []byte("")
+
+ part1, err := multi.PutPart(1, bytes.NewReader(data1))
+ c.Assert(err, check.IsNil)
+ part2, err := multi.PutPart(2, bytes.NewReader(data2))
+ c.Assert(err, check.IsNil)
+
+ // Purposefully reversed. The order requirement must be handled.
+ err = multi.Complete([]s3.Part{part2, part1})
+ c.Assert(err, check.IsNil)
+
+ data, err := b.Get("multi")
+ c.Assert(err, check.IsNil)
+
+ c.Assert(len(data), check.Equals, len(data1)+len(data2))
+ for i := range data1 {
+ if data[i] != data1[i] {
+ c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
+ }
+ }
+ c.Assert(string(data[len(data1):]), check.Equals, string(data2))
+}
+
+type multiList []*s3.Multi
+
+func (l multiList) Len() int { return len(l) }
+func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
+func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+
+func (s *ClientTests) TestListMulti(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, check.IsNil)
+
+ // Ensure an empty state before testing its behavior.
+ multis, _, err := b.ListMulti("", "")
+ for _, m := range multis {
+ err := m.Abort()
+ c.Assert(err, check.IsNil)
+ }
+
+ keys := []string{
+ "a/multi2",
+ "a/multi3",
+ "b/multi4",
+ "multi1",
+ }
+ for _, key := range keys {
+ m, err := b.InitMulti(key, "", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ defer m.Abort()
+ }
+
+ // Amazon's implementation of the multiple-request listing for
+ // multipart uploads in progress seems broken in multiple ways.
+ // (next tokens are not provided, etc).
+ //s3.SetListMultiMax(2)
+
+ multis, prefixes, err := b.ListMulti("", "")
+ c.Assert(err, check.IsNil)
+ for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
+ multis, prefixes, err = b.ListMulti("", "")
+ c.Assert(err, check.IsNil)
+ }
+ sort.Sort(multiList(multis))
+ c.Assert(prefixes, check.IsNil)
+ var gotKeys []string
+ for _, m := range multis {
+ gotKeys = append(gotKeys, m.Key)
+ }
+ c.Assert(gotKeys, check.DeepEquals, keys)
+ for _, m := range multis {
+ c.Assert(m.Bucket, check.Equals, b)
+ c.Assert(m.UploadId, check.Matches, ".+")
+ }
+
+ multis, prefixes, err = b.ListMulti("", "/")
+ for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
+ multis, prefixes, err = b.ListMulti("", "")
+ c.Assert(err, check.IsNil)
+ }
+ c.Assert(err, check.IsNil)
+ c.Assert(prefixes, check.DeepEquals, []string{"a/", "b/"})
+ c.Assert(multis, check.HasLen, 1)
+ c.Assert(multis[0].Bucket, check.Equals, b)
+ c.Assert(multis[0].Key, check.Equals, "multi1")
+ c.Assert(multis[0].UploadId, check.Matches, ".+")
+
+ for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
+ multis, prefixes, err = b.ListMulti("", "")
+ c.Assert(err, check.IsNil)
+ }
+ multis, prefixes, err = b.ListMulti("a/", "/")
+ c.Assert(err, check.IsNil)
+ c.Assert(prefixes, check.IsNil)
+ c.Assert(multis, check.HasLen, 2)
+ c.Assert(multis[0].Bucket, check.Equals, b)
+ c.Assert(multis[0].Key, check.Equals, "a/multi2")
+ c.Assert(multis[0].UploadId, check.Matches, ".+")
+ c.Assert(multis[1].Bucket, check.Equals, b)
+ c.Assert(multis[1].Key, check.Equals, "a/multi3")
+ c.Assert(multis[1].UploadId, check.Matches, ".+")
+}
+
+func (s *ClientTests) TestMultiPutAllZeroLength(c *check.C) {
+ b := testBucket(s.s3)
+ err := b.PutBucket(s3.Private)
+ c.Assert(err, check.IsNil)
+
+ multi, err := b.InitMulti("multi", "text/plain", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ defer multi.Abort()
+
+ // This tests an edge case. Amazon requires at least one
+ // part for multiprat uploads to work, even the part is empty.
+ parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
+ c.Assert(err, check.IsNil)
+ c.Assert(parts, check.HasLen, 1)
+ c.Assert(parts[0].Size, check.Equals, int64(0))
+ c.Assert(parts[0].ETag, check.Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
+
+ err = multi.Complete(parts)
+ c.Assert(err, check.IsNil)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3t_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3t_test.go
new file mode 100644
index 00000000..17148926
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3t_test.go
@@ -0,0 +1,79 @@
+package s3_test
+
+import (
+ "github.com/crowdmob/goamz/aws"
+ "github.com/crowdmob/goamz/s3"
+ "github.com/crowdmob/goamz/s3/s3test"
+ "gopkg.in/check.v1"
+)
+
+type LocalServer struct {
+ auth aws.Auth
+ region aws.Region
+ srv *s3test.Server
+ config *s3test.Config
+}
+
+func (s *LocalServer) SetUp(c *check.C) {
+ srv, err := s3test.NewServer(s.config)
+ c.Assert(err, check.IsNil)
+ c.Assert(srv, check.NotNil)
+
+ s.srv = srv
+ s.region = aws.Region{
+ Name: "faux-region-1",
+ S3Endpoint: srv.URL(),
+ S3LocationConstraint: true, // s3test server requires a LocationConstraint
+ }
+}
+
+// LocalServerSuite defines tests that will run
+// against the local s3test server. It includes
+// selected tests from ClientTests;
+// when the s3test functionality is sufficient, it should
+// include all of them, and ClientTests can be simply embedded.
+type LocalServerSuite struct {
+ srv LocalServer
+ clientTests ClientTests
+}
+
+var (
+ // run tests twice, once in us-east-1 mode, once not.
+ _ = check.Suite(&LocalServerSuite{})
+ _ = check.Suite(&LocalServerSuite{
+ srv: LocalServer{
+ config: &s3test.Config{
+ Send409Conflict: true,
+ },
+ },
+ })
+)
+
+func (s *LocalServerSuite) SetUpSuite(c *check.C) {
+ s.srv.SetUp(c)
+ s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
+
+ // TODO Sadly the fake server ignores auth completely right now. :-(
+ s.clientTests.authIsBroken = true
+ s.clientTests.Cleanup()
+}
+
+func (s *LocalServerSuite) TearDownTest(c *check.C) {
+ s.clientTests.Cleanup()
+}
+
+func (s *LocalServerSuite) TestBasicFunctionality(c *check.C) {
+ s.clientTests.TestBasicFunctionality(c)
+}
+
+func (s *LocalServerSuite) TestGetNotFound(c *check.C) {
+ s.clientTests.TestGetNotFound(c)
+}
+
+func (s *LocalServerSuite) TestBucketList(c *check.C) {
+ s.clientTests.TestBucketList(c)
+}
+
+func (s *LocalServerSuite) TestDoublePutBucket(c *check.C) {
+ s.clientTests.TestDoublePutBucket(c)
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3test/server.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3test/server.go
new file mode 100644
index 00000000..42bddcfc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3test/server.go
@@ -0,0 +1,629 @@
+package s3test
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "github.com/crowdmob/goamz/s3"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const debug = false
+
+type s3Error struct {
+ statusCode int
+ XMLName struct{} `xml:"Error"`
+ Code string
+ Message string
+ BucketName string
+ RequestId string
+ HostId string
+}
+
+type action struct {
+ srv *Server
+ w http.ResponseWriter
+ req *http.Request
+ reqId string
+}
+
+// Config controls the internal behaviour of the Server. A nil config is the default
+// and behaves as if all configurations assume their default behaviour. Once passed
+// to NewServer, the configuration must not be modified.
+type Config struct {
+ // Send409Conflict controls how the Server will respond to calls to PUT on a
+ // previously existing bucket. The default is false, and corresponds to the
+ // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
+ // all other regions.
+ // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
+ Send409Conflict bool
+}
+
+func (c *Config) send409Conflict() bool {
+ if c != nil {
+ return c.Send409Conflict
+ }
+ return false
+}
+
+// Server is a fake S3 server for testing purposes.
+// All of the data for the server is kept in memory.
+type Server struct {
+ url string
+ reqId int
+ listener net.Listener
+ mu sync.Mutex
+ buckets map[string]*bucket
+ config *Config
+}
+
+type bucket struct {
+ name string
+ acl s3.ACL
+ ctime time.Time
+ objects map[string]*object
+}
+
+type object struct {
+ name string
+ mtime time.Time
+ meta http.Header // metadata to return with requests.
+ checksum []byte // also held as Content-MD5 in meta.
+ data []byte
+}
+
+// A resource encapsulates the subject of an HTTP request.
+// The resource referred to may or may not exist
+// when the request is made.
+type resource interface {
+ put(a *action) interface{}
+ get(a *action) interface{}
+ post(a *action) interface{}
+ delete(a *action) interface{}
+}
+
+func NewServer(config *Config) (*Server, error) {
+ l, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ return nil, fmt.Errorf("cannot listen on localhost: %v", err)
+ }
+ srv := &Server{
+ listener: l,
+ url: "http://" + l.Addr().String(),
+ buckets: make(map[string]*bucket),
+ config: config,
+ }
+ go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ srv.serveHTTP(w, req)
+ }))
+ return srv, nil
+}
+
+// Quit closes down the server.
+func (srv *Server) Quit() {
+ srv.listener.Close()
+}
+
+// URL returns a URL for the server.
+func (srv *Server) URL() string {
+ return srv.url
+}
+
+func fatalf(code int, codeStr string, errf string, a ...interface{}) {
+ panic(&s3Error{
+ statusCode: code,
+ Code: codeStr,
+ Message: fmt.Sprintf(errf, a...),
+ })
+}
+
+// serveHTTP serves the S3 protocol.
+func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
+ // ignore error from ParseForm as it's usually spurious.
+ req.ParseForm()
+
+ srv.mu.Lock()
+ defer srv.mu.Unlock()
+
+ if debug {
+ log.Printf("s3test %q %q", req.Method, req.URL)
+ }
+ a := &action{
+ srv: srv,
+ w: w,
+ req: req,
+ reqId: fmt.Sprintf("%09X", srv.reqId),
+ }
+ srv.reqId++
+
+ var r resource
+ defer func() {
+ switch err := recover().(type) {
+ case *s3Error:
+ switch r := r.(type) {
+ case objectResource:
+ err.BucketName = r.bucket.name
+ case bucketResource:
+ err.BucketName = r.name
+ }
+ err.RequestId = a.reqId
+ // TODO HostId
+ w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
+ w.WriteHeader(err.statusCode)
+ xmlMarshal(w, err)
+ case nil:
+ default:
+ panic(err)
+ }
+ }()
+
+ r = srv.resourceForURL(req.URL)
+
+ var resp interface{}
+ switch req.Method {
+ case "PUT":
+ resp = r.put(a)
+ case "GET", "HEAD":
+ resp = r.get(a)
+ case "DELETE":
+ resp = r.delete(a)
+ case "POST":
+ resp = r.post(a)
+ default:
+ fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
+ }
+ if resp != nil && req.Method != "HEAD" {
+ xmlMarshal(w, resp)
+ }
+}
+
+// xmlMarshal is the same as xml.Marshal except that
+// it panics on error. The marshalling should not fail,
+// but we want to know if it does.
+func xmlMarshal(w io.Writer, x interface{}) {
+ if err := xml.NewEncoder(w).Encode(x); err != nil {
+ panic(fmt.Errorf("error marshalling %#v: %v", x, err))
+ }
+}
+
+// In a fully implemented test server, each of these would have
+// its own resource type.
+var unimplementedBucketResourceNames = map[string]bool{
+ "acl": true,
+ "lifecycle": true,
+ "policy": true,
+ "location": true,
+ "logging": true,
+ "notification": true,
+ "versions": true,
+ "requestPayment": true,
+ "versioning": true,
+ "website": true,
+ "uploads": true,
+}
+
+var unimplementedObjectResourceNames = map[string]bool{
+ "uploadId": true,
+ "acl": true,
+ "torrent": true,
+ "uploads": true,
+}
+
+var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
+
+// resourceForURL returns a resource object for the given URL.
+func (srv *Server) resourceForURL(u *url.URL) (r resource) {
+ m := pathRegexp.FindStringSubmatch(u.Path)
+ if m == nil {
+ fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
+ }
+ bucketName := m[2]
+ objectName := m[4]
+ if bucketName == "" {
+ return nullResource{} // root
+ }
+ b := bucketResource{
+ name: bucketName,
+ bucket: srv.buckets[bucketName],
+ }
+ q := u.Query()
+ if objectName == "" {
+ for name := range q {
+ if unimplementedBucketResourceNames[name] {
+ return nullResource{}
+ }
+ }
+ return b
+
+ }
+ if b.bucket == nil {
+ fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
+ }
+ objr := objectResource{
+ name: objectName,
+ version: q.Get("versionId"),
+ bucket: b.bucket,
+ }
+ for name := range q {
+ if unimplementedObjectResourceNames[name] {
+ return nullResource{}
+ }
+ }
+ if obj := objr.bucket.objects[objr.name]; obj != nil {
+ objr.object = obj
+ }
+ return objr
+}
+
+// nullResource has error stubs for all resource methods.
+type nullResource struct{}
+
+func notAllowed() interface{} {
+ fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
+ return nil
+}
+
+func (nullResource) put(a *action) interface{} { return notAllowed() }
+func (nullResource) get(a *action) interface{} { return notAllowed() }
+func (nullResource) post(a *action) interface{} { return notAllowed() }
+func (nullResource) delete(a *action) interface{} { return notAllowed() }
+
+const timeFormat = "2006-01-02T15:04:05.000Z07:00"
+
+type bucketResource struct {
+ name string
+ bucket *bucket // non-nil if the bucket already exists.
+}
+
+// GET on a bucket lists the objects in the bucket.
+// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
+func (r bucketResource) get(a *action) interface{} {
+ if r.bucket == nil {
+ fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
+ }
+ delimiter := a.req.Form.Get("delimiter")
+ marker := a.req.Form.Get("marker")
+ maxKeys := -1
+ if s := a.req.Form.Get("max-keys"); s != "" {
+ i, err := strconv.Atoi(s)
+ if err != nil || i < 0 {
+ fatalf(400, "invalid value for max-keys: %q", s)
+ }
+ maxKeys = i
+ }
+ prefix := a.req.Form.Get("prefix")
+ a.w.Header().Set("Content-Type", "application/xml")
+
+ if a.req.Method == "HEAD" {
+ return nil
+ }
+
+ var objs orderedObjects
+
+ // first get all matching objects and arrange them in alphabetical order.
+ for name, obj := range r.bucket.objects {
+ if strings.HasPrefix(name, prefix) {
+ objs = append(objs, obj)
+ }
+ }
+ sort.Sort(objs)
+
+ if maxKeys <= 0 {
+ maxKeys = 1000
+ }
+ resp := &s3.ListResp{
+ Name: r.bucket.name,
+ Prefix: prefix,
+ Delimiter: delimiter,
+ Marker: marker,
+ MaxKeys: maxKeys,
+ }
+
+ var prefixes []string
+ for _, obj := range objs {
+ if !strings.HasPrefix(obj.name, prefix) {
+ continue
+ }
+ name := obj.name
+ isPrefix := false
+ if delimiter != "" {
+ if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
+ name = obj.name[:len(prefix)+i+len(delimiter)]
+ if prefixes != nil && prefixes[len(prefixes)-1] == name {
+ continue
+ }
+ isPrefix = true
+ }
+ }
+ if name <= marker {
+ continue
+ }
+ if len(resp.Contents)+len(prefixes) >= maxKeys {
+ resp.IsTruncated = true
+ break
+ }
+ if isPrefix {
+ prefixes = append(prefixes, name)
+ } else {
+ // Contents contains only keys not found in CommonPrefixes
+ resp.Contents = append(resp.Contents, obj.s3Key())
+ }
+ }
+ resp.CommonPrefixes = prefixes
+ return resp
+}
+
+// orderedObjects holds a slice of objects that can be sorted
+// by name.
+type orderedObjects []*object
+
+func (s orderedObjects) Len() int {
+ return len(s)
+}
+func (s orderedObjects) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s orderedObjects) Less(i, j int) bool {
+ return s[i].name < s[j].name
+}
+
+func (obj *object) s3Key() s3.Key {
+ return s3.Key{
+ Key: obj.name,
+ LastModified: obj.mtime.Format(timeFormat),
+ Size: int64(len(obj.data)),
+ ETag: fmt.Sprintf(`"%x"`, obj.checksum),
+ // TODO StorageClass
+ // TODO Owner
+ }
+}
+
+// DELETE on a bucket deletes the bucket if it's not empty.
+func (r bucketResource) delete(a *action) interface{} {
+ b := r.bucket
+ if b == nil {
+ fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
+ }
+ if len(b.objects) > 0 {
+ fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
+ }
+ delete(a.srv.buckets, b.name)
+ return nil
+}
+
+// PUT on a bucket creates the bucket.
+// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
+func (r bucketResource) put(a *action) interface{} {
+ var created bool
+ if r.bucket == nil {
+ if !validBucketName(r.name) {
+ fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
+ }
+ if loc := locationConstraint(a); loc == "" {
+ fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
+ }
+ // TODO validate acl
+ r.bucket = &bucket{
+ name: r.name,
+ // TODO default acl
+ objects: make(map[string]*object),
+ }
+ a.srv.buckets[r.name] = r.bucket
+ created = true
+ }
+ if !created && a.srv.config.send409Conflict() {
+ fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
+ }
+ r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
+ return nil
+}
+
+func (bucketResource) post(a *action) interface{} {
+ fatalf(400, "Method", "bucket POST method not available")
+ return nil
+}
+
+// validBucketName returns whether name is a valid bucket name.
+// Here are the rules, from:
+// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
+//
+// Can contain lowercase letters, numbers, periods (.), underscores (_),
+// and dashes (-). You can use uppercase letters for buckets only in the
+// US Standard region.
+//
+// Must start with a number or letter
+//
+// Must be between 3 and 255 characters long
+//
+// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
+// but the real S3 server does not seem to check that rule, so we will not
+// check it either.
+//
+func validBucketName(name string) bool {
+ if len(name) < 3 || len(name) > 255 {
+ return false
+ }
+ r := name[0]
+ if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
+ return false
+ }
+ for _, r := range name {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'a' && r <= 'z':
+ case r == '_' || r == '-':
+ case r == '.':
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+var responseParams = map[string]bool{
+ "content-type": true,
+ "content-language": true,
+ "expires": true,
+ "cache-control": true,
+ "content-disposition": true,
+ "content-encoding": true,
+}
+
+type objectResource struct {
+ name string
+ version string
+ bucket *bucket // always non-nil.
+ object *object // may be nil.
+}
+
+// GET on an object gets the contents of the object.
+// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
+func (objr objectResource) get(a *action) interface{} {
+ obj := objr.object
+ if obj == nil {
+ fatalf(404, "NoSuchKey", "The specified key does not exist.")
+ }
+ h := a.w.Header()
+ // add metadata
+ for name, d := range obj.meta {
+ h[name] = d
+ }
+ // override header values in response to request parameters.
+ for name, vals := range a.req.Form {
+ if strings.HasPrefix(name, "response-") {
+ name = name[len("response-"):]
+ if !responseParams[name] {
+ continue
+ }
+ h.Set(name, vals[0])
+ }
+ }
+ if r := a.req.Header.Get("Range"); r != "" {
+ fatalf(400, "NotImplemented", "range unimplemented")
+ }
+ // TODO Last-Modified-Since
+ // TODO If-Modified-Since
+ // TODO If-Unmodified-Since
+ // TODO If-Match
+ // TODO If-None-Match
+ // TODO Connection: close ??
+ // TODO x-amz-request-id
+ h.Set("Content-Length", fmt.Sprint(len(obj.data)))
+ h.Set("ETag", hex.EncodeToString(obj.checksum))
+ h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
+ if a.req.Method == "HEAD" {
+ return nil
+ }
+ // TODO avoid holding the lock when writing data.
+ _, err := a.w.Write(obj.data)
+ if err != nil {
+ // we can't do much except just log the fact.
+ log.Printf("error writing data: %v", err)
+ }
+ return nil
+}
+
+var metaHeaders = map[string]bool{
+ "Content-MD5": true,
+ "x-amz-acl": true,
+ "Content-Type": true,
+ "Content-Encoding": true,
+ "Content-Disposition": true,
+}
+
+// PUT on an object creates the object.
+func (objr objectResource) put(a *action) interface{} {
+ // TODO Cache-Control header
+ // TODO Expires header
+ // TODO x-amz-server-side-encryption
+ // TODO x-amz-storage-class
+
+ // TODO is this correct, or should we erase all previous metadata?
+ obj := objr.object
+ if obj == nil {
+ obj = &object{
+ name: objr.name,
+ meta: make(http.Header),
+ }
+ }
+
+ var expectHash []byte
+ if c := a.req.Header.Get("Content-MD5"); c != "" {
+ var err error
+ expectHash, err = base64.StdEncoding.DecodeString(c)
+ if err != nil || len(expectHash) != md5.Size {
+ fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
+ }
+ }
+ sum := md5.New()
+ // TODO avoid holding lock while reading data.
+ data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
+ if err != nil {
+ fatalf(400, "TODO", "read error")
+ }
+ gotHash := sum.Sum(nil)
+ if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
+ fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
+ }
+ if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
+ fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
+ }
+
+ // PUT request has been successful - save data and metadata
+ for key, values := range a.req.Header {
+ key = http.CanonicalHeaderKey(key)
+ if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
+ obj.meta[key] = values
+ }
+ }
+ obj.data = data
+ obj.checksum = gotHash
+ obj.mtime = time.Now()
+ objr.bucket.objects[objr.name] = obj
+ return nil
+}
+
+func (objr objectResource) delete(a *action) interface{} {
+ delete(objr.bucket.objects, objr.name)
+ return nil
+}
+
+func (objr objectResource) post(a *action) interface{} {
+ fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
+ return nil
+}
+
+type CreateBucketConfiguration struct {
+ LocationConstraint string
+}
+
+// locationConstraint parses the request body (if present).
+// If there is no body, an empty string will be returned.
+func locationConstraint(a *action) string {
+ var body bytes.Buffer
+ if _, err := io.Copy(&body, a.req.Body); err != nil {
+ fatalf(400, "InvalidRequest", err.Error())
+ }
+ if body.Len() == 0 {
+ return ""
+ }
+ var loc CreateBucketConfiguration
+ if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
+ fatalf(400, "InvalidRequest", err.Error())
+ }
+ return loc.LocationConstraint
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/sign.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/sign.go
new file mode 100644
index 00000000..1c33f274
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/sign.go
@@ -0,0 +1,120 @@
+package s3
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "github.com/crowdmob/goamz/aws"
+ "log"
+ "sort"
+ "strings"
+)
+
+var b64 = base64.StdEncoding
+
+// ----------------------------------------------------------------------------
+// S3 signing (http://goo.gl/G1LrK)
+
+var s3ParamsToSign = map[string]bool{
+ "acl": true,
+ "location": true,
+ "logging": true,
+ "notification": true,
+ "partNumber": true,
+ "policy": true,
+ "requestPayment": true,
+ "torrent": true,
+ "uploadId": true,
+ "uploads": true,
+ "versionId": true,
+ "versioning": true,
+ "versions": true,
+ "response-content-type": true,
+ "response-content-language": true,
+ "response-expires": true,
+ "response-cache-control": true,
+ "response-content-disposition": true,
+ "response-content-encoding": true,
+ "website": true,
+ "delete": true,
+}
+
+func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
+ var md5, ctype, date, xamz string
+ var xamzDate bool
+ var keys, sarray []string
+ xheaders := make(map[string]string)
+ for k, v := range headers {
+ k = strings.ToLower(k)
+ switch k {
+ case "content-md5":
+ md5 = v[0]
+ case "content-type":
+ ctype = v[0]
+ case "date":
+ if !xamzDate {
+ date = v[0]
+ }
+ default:
+ if strings.HasPrefix(k, "x-amz-") {
+ keys = append(keys, k)
+ xheaders[k] = strings.Join(v, ",")
+ if k == "x-amz-date" {
+ xamzDate = true
+ date = ""
+ }
+ }
+ }
+ }
+ if len(keys) > 0 {
+ sort.StringSlice(keys).Sort()
+ for i := range keys {
+ key := keys[i]
+ value := xheaders[key]
+ sarray = append(sarray, key+":"+value)
+ }
+ xamz = strings.Join(sarray, "\n") + "\n"
+ }
+
+ expires := false
+ if v, ok := params["Expires"]; ok {
+ // Query string request authentication alternative.
+ expires = true
+ date = v[0]
+ params["AWSAccessKeyId"] = []string{auth.AccessKey}
+ }
+
+ sarray = sarray[0:0]
+ for k, v := range params {
+ if s3ParamsToSign[k] {
+ for _, vi := range v {
+ if vi == "" {
+ sarray = append(sarray, k)
+ } else {
+ // "When signing you do not encode these values."
+ sarray = append(sarray, k+"="+vi)
+ }
+ }
+ }
+ }
+ if len(sarray) > 0 {
+ sort.StringSlice(sarray).Sort()
+ canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
+ }
+
+ payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
+ hash := hmac.New(sha1.New, []byte(auth.SecretKey))
+ hash.Write([]byte(payload))
+ signature := make([]byte, b64.EncodedLen(hash.Size()))
+ b64.Encode(signature, hash.Sum(nil))
+
+ if expires {
+ params["Signature"] = []string{string(signature)}
+ } else {
+ headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
+ }
+ if debug {
+ log.Printf("Signature payload: %q", payload)
+ log.Printf("Signature: %q", signature)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/sign_test.go b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/sign_test.go
new file mode 100644
index 00000000..0e35aef6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/crowdmob/goamz/s3/sign_test.go
@@ -0,0 +1,148 @@
+package s3_test
+
+import (
+ "github.com/crowdmob/goamz/aws"
+ "github.com/crowdmob/goamz/s3"
+ "gopkg.in/check.v1"
+)
+
+// S3 ReST authentication docs: http://goo.gl/G1LrK
+
+var testAuth = aws.Auth{AccessKey: "0PN5J17HBGZHT7JJ3X82", SecretKey: "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o"}
+
+func (s *S) TestSignExampleObjectGet(c *check.C) {
+ method := "GET"
+ path := "/johnsmith/photos/puppy.jpg"
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleObjectPut(c *check.C) {
+ method := "PUT"
+ path := "/johnsmith/photos/puppy.jpg"
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
+ "Content-Type": {"image/jpeg"},
+ "Content-Length": {"94328"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleList(c *check.C) {
+ method := "GET"
+ path := "/johnsmith/"
+ params := map[string][]string{
+ "prefix": {"photos"},
+ "max-keys": {"50"},
+ "marker": {"puppy"},
+ }
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
+ "User-Agent": {"Mozilla/5.0"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleFetch(c *check.C) {
+ method := "GET"
+ path := "/johnsmith/"
+ params := map[string][]string{
+ "acl": {""},
+ }
+ headers := map[string][]string{
+ "Host": {"johnsmith.s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleDelete(c *check.C) {
+ method := "DELETE"
+ path := "/johnsmith/photos/puppy.jpg"
+ params := map[string][]string{}
+ headers := map[string][]string{
+ "Host": {"s3.amazonaws.com"},
+ "Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
+ "User-Agent": {"dotnet"},
+ "x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleUpload(c *check.C) {
+ method := "PUT"
+ path := "/static.johnsmith.net/db-backup.dat.gz"
+ params := map[string][]string{}
+ headers := map[string][]string{
+ "Host": {"static.johnsmith.net:8080"},
+ "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
+ "User-Agent": {"curl/7.15.5"},
+ "x-amz-acl": {"public-read"},
+ "content-type": {"application/x-download"},
+ "Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
+ "X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
+ "X-Amz-Meta-FileChecksum": {"0x02661779"},
+ "X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
+ "Content-Disposition": {"attachment; filename=database.dat"},
+ "Content-Encoding": {"gzip"},
+ "Content-Length": {"5913339"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleListAllMyBuckets(c *check.C) {
+ method := "GET"
+ path := "/"
+ headers := map[string][]string{
+ "Host": {"s3.amazonaws.com"},
+ "Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleUnicodeKeys(c *check.C) {
+ method := "GET"
+ path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
+ headers := map[string][]string{
+ "Host": {"s3.amazonaws.com"},
+ "Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
+ }
+ s3.Sign(testAuth, method, path, nil, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
+
+func (s *S) TestSignExampleCustomSSE(c *check.C) {
+ method := "GET"
+ path := "/secret/config"
+ params := map[string][]string{}
+ headers := map[string][]string{
+ "Host": {"secret.johnsmith.net:8080"},
+ "Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
+ "x-amz-server-side-encryption-customer-key": {"MWJhakVna1dQT1B0SDFMeGtVVnRQRTFGaU1ldFJrU0I="},
+ "x-amz-server-side-encryption-customer-key-MD5": {"glIqxpqQ4a9aoK/iLttKzQ=="},
+ "x-amz-server-side-encryption-customer-algorithm": {"AES256"},
+ }
+ s3.Sign(testAuth, method, path, params, headers)
+ expected := "AWS 0PN5J17HBGZHT7JJ3X82:Xq6PWmIo0aOWq+LDjCEiCGgbmHE="
+ c.Assert(headers["Authorization"], check.DeepEquals, []string{expected})
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/MAINTAINERS
new file mode 100644
index 00000000..9571a14a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/MAINTAINERS
@@ -0,0 +1,4 @@
+Derek McGowan (github: dmcgowan)
+Eric Windisch (github: ewindisch)
+Josh Hawn (github: jlhawn)
+Vincent Batts (github: vbatts)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go
new file mode 100644
index 00000000..06a42825
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/builder_context.go
@@ -0,0 +1,20 @@
+package tarsum
+
+// This interface extends TarSum by adding the Remove method. In general
+// there was concern about adding this method to TarSum itself so instead
+// it is being added just to "BuilderContext" which will then only be used
+// during the .dockerignore file processing - see builder/evaluator.go
+type BuilderContext interface {
+ TarSum
+ Remove(string)
+}
+
+func (bc *tarSum) Remove(filename string) {
+ for i, fis := range bc.sums {
+ if fis.Name() == filename {
+ bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
+ // Note, we don't just return because there could be
+ // more than one with this name
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go
new file mode 100644
index 00000000..f9f46809
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go
@@ -0,0 +1,125 @@
+package tarsum
+
+import "sort"
+
+// This info will be accessed through interface so the actual name and sum cannot be medled with
+type FileInfoSumInterface interface {
+ // File name
+ Name() string
+ // Checksum of this particular file and its headers
+ Sum() string
+ // Position of file in the tar
+ Pos() int64
+}
+
+type fileInfoSum struct {
+ name string
+ sum string
+ pos int64
+}
+
+func (fis fileInfoSum) Name() string {
+ return fis.name
+}
+func (fis fileInfoSum) Sum() string {
+ return fis.sum
+}
+func (fis fileInfoSum) Pos() int64 {
+ return fis.pos
+}
+
+type FileInfoSums []FileInfoSumInterface
+
+// GetFile returns the first FileInfoSumInterface with a matching name
+func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
+ for i := range fis {
+ if fis[i].Name() == name {
+ return fis[i]
+ }
+ }
+ return nil
+}
+
+// GetAllFile returns a FileInfoSums with all matching names
+func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
+ f := FileInfoSums{}
+ for i := range fis {
+ if fis[i].Name() == name {
+ f = append(f, fis[i])
+ }
+ }
+ return f
+}
+
+func contains(s []string, e string) bool {
+ for _, a := range s {
+ if a == e {
+ return true
+ }
+ }
+ return false
+}
+
+func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
+ seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
+ for i := range fis {
+ f := fis[i]
+ if _, ok := seen[f.Name()]; ok {
+ dups = append(dups, f)
+ } else {
+ seen[f.Name()] = 0
+ }
+ }
+ return dups
+}
+
+func (fis FileInfoSums) Len() int { return len(fis) }
+func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
+
+func (fis FileInfoSums) SortByPos() {
+ sort.Sort(byPos{fis})
+}
+
+func (fis FileInfoSums) SortByNames() {
+ sort.Sort(byName{fis})
+}
+
+func (fis FileInfoSums) SortBySums() {
+ dups := fis.GetDuplicatePaths()
+ if len(dups) > 0 {
+ sort.Sort(bySum{fis, dups})
+ } else {
+ sort.Sort(bySum{fis, nil})
+ }
+}
+
+// byName is a sort.Sort helper for sorting by file names.
+// If names are the same, order them by their appearance in the tar archive
+type byName struct{ FileInfoSums }
+
+func (bn byName) Less(i, j int) bool {
+ if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
+ return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
+ }
+ return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
+}
+
+// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
+type bySum struct {
+ FileInfoSums
+ dups FileInfoSums
+}
+
+func (bs bySum) Less(i, j int) bool {
+ if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
+ return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
+ }
+ return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
+}
+
+// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
+type byPos struct{ FileInfoSums }
+
+func (bp byPos) Less(i, j int) bool {
+ return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go
new file mode 100644
index 00000000..e1c6cc12
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go
@@ -0,0 +1,45 @@
+package tarsum
+
+import "testing"
+
+func newFileInfoSums() FileInfoSums {
+ return FileInfoSums{
+ fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2},
+ fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5},
+ fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0},
+ fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3},
+ fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4},
+ fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1},
+ }
+}
+
+func TestSortFileInfoSums(t *testing.T) {
+ dups := newFileInfoSums().GetAllFile("dup1")
+ if len(dups) != 2 {
+ t.Errorf("expected length 2, got %d", len(dups))
+ }
+ dups.SortByNames()
+ if dups[0].Pos() != 4 {
+ t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos())
+ }
+
+ fis := newFileInfoSums()
+ expected := "0abcdef1234567890"
+ fis.SortBySums()
+ got := fis[0].Sum()
+ if got != expected {
+ t.Errorf("Expected %q, got %q", expected, got)
+ }
+
+ fis = newFileInfoSums()
+ expected = "dup1"
+ fis.SortByNames()
+ gotFis := fis[0]
+ if gotFis.Name() != expected {
+ t.Errorf("Expected %q, got %q", expected, gotFis.Name())
+ }
+ // since a duplicate is first, ensure it is ordered first by position too
+ if gotFis.Pos() != 4 {
+ t.Errorf("Expected %d, got %d", 4, gotFis.Pos())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go
new file mode 100644
index 00000000..c9f1315c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go
@@ -0,0 +1,237 @@
+package tarsum
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/hex"
+ "hash"
+ "io"
+ "strings"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+const (
+ buf8K = 8 * 1024
+ buf16K = 16 * 1024
+ buf32K = 32 * 1024
+)
+
+// NewTarSum creates a new interface for calculating a fixed time checksum of a
+// tar archive.
+//
+// This is used for calculating checksums of layers of an image, in some cases
+// including the byte payload of the image's json metadata as well, and for
+// calculating the checksums for buildcache.
+func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
+ return NewTarSumHash(r, dc, v, DefaultTHash)
+}
+
+// Create a new TarSum, providing a THash to use rather than the DefaultTHash
+func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
+ headerSelector, err := getTarHeaderSelector(v)
+ if err != nil {
+ return nil, err
+ }
+ ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
+ err = ts.initTarSum()
+ return ts, err
+}
+
+// TarSum is the generic interface for calculating fixed time
+// checksums of a tar archive
+type TarSum interface {
+ io.Reader
+ GetSums() FileInfoSums
+ Sum([]byte) string
+ Version() Version
+ Hash() THash
+}
+
+// tarSum struct is the structure for a Version0 checksum calculation
+type tarSum struct {
+ io.Reader
+ tarR *tar.Reader
+ tarW *tar.Writer
+ writer writeCloseFlusher
+ bufTar *bytes.Buffer
+ bufWriter *bytes.Buffer
+ bufData []byte
+ h hash.Hash
+ tHash THash
+ sums FileInfoSums
+ fileCounter int64
+ currentFile string
+ finished bool
+ first bool
+ DisableCompression bool // false by default. When false, the output gzip compressed.
+ tarSumVersion Version // this field is not exported so it can not be mutated during use
+ headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
+}
+
+func (ts tarSum) Hash() THash {
+ return ts.tHash
+}
+
+func (ts tarSum) Version() Version {
+ return ts.tarSumVersion
+}
+
+// A hash.Hash type generator and its name
+type THash interface {
+ Hash() hash.Hash
+ Name() string
+}
+
+// Convenience method for creating a THash
+func NewTHash(name string, h func() hash.Hash) THash {
+ return simpleTHash{n: name, h: h}
+}
+
+// TarSum default is "sha256"
+var DefaultTHash = NewTHash("sha256", sha256.New)
+
+type simpleTHash struct {
+ n string
+ h func() hash.Hash
+}
+
+func (sth simpleTHash) Name() string { return sth.n }
+func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
+
+func (ts *tarSum) encodeHeader(h *tar.Header) error {
+ for _, elem := range ts.headerSelector.selectHeaders(h) {
+ if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ts *tarSum) initTarSum() error {
+ ts.bufTar = bytes.NewBuffer([]byte{})
+ ts.bufWriter = bytes.NewBuffer([]byte{})
+ ts.tarR = tar.NewReader(ts.Reader)
+ ts.tarW = tar.NewWriter(ts.bufTar)
+ if !ts.DisableCompression {
+ ts.writer = gzip.NewWriter(ts.bufWriter)
+ } else {
+ ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
+ }
+ if ts.tHash == nil {
+ ts.tHash = DefaultTHash
+ }
+ ts.h = ts.tHash.Hash()
+ ts.h.Reset()
+ ts.first = true
+ ts.sums = FileInfoSums{}
+ return nil
+}
+
+func (ts *tarSum) Read(buf []byte) (int, error) {
+ if ts.finished {
+ return ts.bufWriter.Read(buf)
+ }
+ if len(ts.bufData) < len(buf) {
+ switch {
+ case len(buf) <= buf8K:
+ ts.bufData = make([]byte, buf8K)
+ case len(buf) <= buf16K:
+ ts.bufData = make([]byte, buf16K)
+ case len(buf) <= buf32K:
+ ts.bufData = make([]byte, buf32K)
+ default:
+ ts.bufData = make([]byte, len(buf))
+ }
+ }
+ buf2 := ts.bufData[:len(buf)]
+
+ n, err := ts.tarR.Read(buf2)
+ if err != nil {
+ if err == io.EOF {
+ if _, err := ts.h.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+ if !ts.first {
+ ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
+ ts.fileCounter++
+ ts.h.Reset()
+ } else {
+ ts.first = false
+ }
+
+ currentHeader, err := ts.tarR.Next()
+ if err != nil {
+ if err == io.EOF {
+ if err := ts.tarW.Close(); err != nil {
+ return 0, err
+ }
+ if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
+ return 0, err
+ }
+ if err := ts.writer.Close(); err != nil {
+ return 0, err
+ }
+ ts.finished = true
+ return n, nil
+ }
+ return n, err
+ }
+ ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/")
+ if err := ts.encodeHeader(currentHeader); err != nil {
+ return 0, err
+ }
+ if err := ts.tarW.WriteHeader(currentHeader); err != nil {
+ return 0, err
+ }
+ if _, err := ts.tarW.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+ ts.tarW.Flush()
+ if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
+ return 0, err
+ }
+ ts.writer.Flush()
+
+ return ts.bufWriter.Read(buf)
+ }
+ return n, err
+ }
+
+ // Filling the hash buffer
+ if _, err = ts.h.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+
+ // Filling the tar writter
+ if _, err = ts.tarW.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+ ts.tarW.Flush()
+
+ // Filling the output writer
+ if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
+ return 0, err
+ }
+ ts.writer.Flush()
+
+ return ts.bufWriter.Read(buf)
+}
+
+func (ts *tarSum) Sum(extra []byte) string {
+ ts.sums.SortBySums()
+ h := ts.tHash.Hash()
+ if extra != nil {
+ h.Write(extra)
+ }
+ for _, fis := range ts.sums {
+ h.Write([]byte(fis.Sum()))
+ }
+ checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
+ return checksum
+}
+
+func (ts *tarSum) GetSums() FileInfoSums {
+ return ts.sums
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
new file mode 100644
index 00000000..7a6f8edc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
@@ -0,0 +1,225 @@
+page_title: TarSum checksum specification
+page_description: Documentation for algorithms used in the TarSum checksum calculation
+page_keywords: docker, checksum, validation, tarsum
+
+# TarSum Checksum Specification
+
+## Abstract
+
+This document describes the algorithms used in performing the TarSum checksum
+calculation on filesystem layers, the need for this method over existing
+methods, and the versioning of this calculation.
+
+
+## Introduction
+
+The transportation of filesystems, regarding Docker, is done with tar(1)
+archives. There are a variety of tar serialization formats [2], and a key
+concern here is ensuring a repeatable checksum given a set of inputs from a
+generic tar archive. Types of transportation include distribution to and from a
+registry endpoint, saving and loading through commands or Docker daemon APIs,
+transferring the build context from client to Docker daemon, and committing the
+filesystem of a container to become an image.
+
+As tar archives are used for transit, but not preserved in many situations, the
+focus of the algorithm is to ensure the integrity of the preserved filesystem,
+while maintaining a deterministic accountability. This includes neither
+constraining the ordering or manipulation of the files during the creation or
+unpacking of the archive, nor include additional metadata state about the file
+system attributes.
+
+## Intended Audience
+
+This document is outlining the methods used for consistent checksum calculation
+for filesystems transported via tar archives.
+
+Auditing these methodologies is an open and iterative process. This document
+should accommodate the review of source code. Ultimately, this document should
+be the starting point of further refinements to the algorithm and its future
+versions.
+
+## Concept
+
+The checksum mechanism must ensure the integrity and assurance of the
+filesystem payload.
+
+## Checksum Algorithm Profile
+
+A checksum mechanism must define the following operations and attributes:
+
+* Associated hashing cipher - used to checksum each file payload and attribute
+ information.
+* Checksum list - each file of the filesystem archive has its checksum
+ calculated from the payload and attributes of the file. The final checksum is
+ calculated from this list, with specific ordering.
+* Version - as the algorithm adapts to requirements, there are behaviors of the
+ algorithm to manage by versioning.
+* Archive being calculated - the tar archive having its checksum calculated
+
+## Elements of TarSum checksum
+
+The calculated sum output is a text string. The elements included in the output
+of the calculated sum comprise the information needed for validation of the sum
+(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
+form.
+
+There are two delimiters used:
+* '+' separates TarSum version from hashing cipher
+* ':' separates calculation mechanics from expected hash
+
+Example:
+
+```
+ "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
+ | | \ |
+ | | \ |
+ |_version_|_cipher__|__ |
+ | \ |
+ |_calculation_mechanics_|______________________expected_sum_______________________|
+```
+
+## Versioning
+
+Versioning was introduced [0] to accommodate differences in calculation needed,
+and ability to maintain reverse compatibility.
+
+The general algorithm will be describe further in the 'Calculation'.
+
+### Version0
+
+This is the initial version of TarSum.
+
+Its element in the TarSum checksum string is `tarsum`.
+
+### Version1
+
+Its element in the TarSum checksum is `tarsum.v1`.
+
+The notable changes in this version:
+* Exclusion of file `mtime` from the file information headers, in each file
+ checksum calculation
+* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
+ tar file info headers) keys and values in each file checksum calculation
+
+### VersionDev
+
+*Do not use unless validating refinements to the checksum algorithm*
+
+Its element in the TarSum checksum is `tarsum.dev`.
+
+This is a floating place holder for a next version and grounds for testing
+changes. The methods used for calculation are subject to change without notice,
+and this version is for testing and not for production use.
+
+## Ciphers
+
+The official default and standard hashing cipher used in the calculation mechanic
+is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
+
+Though the TarSum algorithm itself is not exclusively bound to the single
+hashing cipher `sha256`, support for alternate hashing ciphers was later added
+[1]. Use cases for alternate cipher could include future-proofing TarSum
+checksum format and using faster cipher hashes for tar filesystem checksums.
+
+## Calculation
+
+### Requirement
+
+As mentioned earlier, the calculation is such that it takes into consideration
+the lifecycle of the tar archive. In that the tar archive is not an immutable,
+permanent artifact. Otherwise options like relying on a known hashing cipher
+checksum of the archive itself would be reliable enough. The tar archive of the
+filesystem is used as a transportation medium for Docker images, and the
+archive is discarded once its contents are extracted. Therefore, for consistent
+validation items such as order of files in the tar archive and time stamps are
+subject to change once an image is received.
+
+### Process
+
+The method is typically iterative due to reading tar info headers from the
+archive stream, though this is not a strict requirement.
+
+#### Files
+
+Each file in the tar archive have their contents (headers and body) checksummed
+individually using the designated associated hashing cipher. The ordered
+headers of the file are written to the checksum calculation first, and then the
+payload of the file body.
+
+The resulting checksum of the file is appended to the list of file sums. The
+sum is encoded as a string of the hexadecimal digest. Additionally, the file
+name and position in the archive is kept as reference for special ordering.
+
+#### Headers
+
+The following headers are read, in this
+order ( and the corresponding representation of its value):
+* 'name' - string
+* 'mode' - string of the base10 integer
+* 'uid' - string of the integer
+* 'gid' - string of the integer
+* 'size' - string of the integer
+* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
+* 'typeflag' - string of the char
+* 'linkname' - string
+* 'uname' - string
+* 'gname' - string
+* 'devmajor' - string of the integer
+* 'devminor' - string of the integer
+
+For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax
+headers) included after the above list. These xattrs key/values are first
+sorted by the keys.
+
+#### Header Format
+
+The ordered headers are written to the hash in the format of
+
+ "{.key}{.value}"
+
+with no newline.
+
+#### Body
+
+After the order headers of the file have been added to the checksum for the
+file, the body of the file is written to the hash.
+
+#### List of file sums
+
+The list of file sums is sorted by the string of the hexadecimal digest.
+
+If there are two files in the tar with matching paths, the order of occurrence
+for that path is reflected for the sums of the corresponding file header and
+body.
+
+#### Final Checksum
+
+Begin with a fresh or initial state of the associated hash cipher. If there is
+additional payload to include in the TarSum calculation for the archive, it is
+written first. Then each checksum from the ordered list of file sums is written
+to the hash.
+
+The resulting digest is formatted per the Elements of TarSum checksum,
+including the TarSum version, the associated hash cipher and the hexadecimal
+encoded checksum digest.
+
+## Security Considerations
+
+The initial version of TarSum has undergone one update that could invalidate
+handcrafted tar archives. The tar archive format supports appending of files
+with same names as prior files in the archive. The latter file will clobber the
+prior file of the same path. Due to this the algorithm now accounts for files
+with matching paths, and orders the list of file sums accordingly [3].
+
+## Footnotes
+
+* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
+* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
+* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
+* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
+
+## Acknowledgements
+
+Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
+TarSum calculation.
+
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go
new file mode 100644
index 00000000..26f12cc8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go
@@ -0,0 +1,573 @@
+package tarsum
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+type testLayer struct {
+ filename string
+ options *sizedOptions
+ jsonfile string
+ gzip bool
+ tarsum string
+ version Version
+ hash THash
+}
+
+var testLayers = []testLayer{
+ {
+ filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
+ jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
+ version: Version0,
+ tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"},
+ {
+ filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
+ jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
+ version: VersionDev,
+ tarsum: "tarsum.dev+sha256:486b86e25c4db4551228154848bc4663b15dd95784b1588980f4ba1cb42e83e9"},
+ {
+ filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
+ jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
+ gzip: true,
+ tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"},
+ {
+ // Tests existing version of TarSum when xattrs are present
+ filename: "testdata/xattr/layer.tar",
+ jsonfile: "testdata/xattr/json",
+ version: Version0,
+ tarsum: "tarsum+sha256:e86f81a4d552f13039b1396ed03ca968ea9717581f9577ef1876ea6ff9b38c98"},
+ {
+ // Tests next version of TarSum when xattrs are present
+ filename: "testdata/xattr/layer.tar",
+ jsonfile: "testdata/xattr/json",
+ version: VersionDev,
+ tarsum: "tarsum.dev+sha256:6235cd3a2afb7501bac541772a3d61a3634e95bc90bb39a4676e2cb98d08390d"},
+ {
+ filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar",
+ jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json",
+ tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"},
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"},
+ {
+ // this tar has two files with the same path
+ filename: "testdata/collision/collision-0.tar",
+ tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"},
+ {
+ // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above
+ filename: "testdata/collision/collision-1.tar",
+ tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"},
+ {
+ // this tar has newer of collider-0.tar, ensuring is has different hash
+ filename: "testdata/collision/collision-2.tar",
+ tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"},
+ {
+ // this tar has newer of collider-1.tar, ensuring is has different hash
+ filename: "testdata/collision/collision-3.tar",
+ tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"},
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53",
+ hash: md5THash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df",
+ hash: sha1Hash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c",
+ hash: sha224Hash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636",
+ hash: sha384Hash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855",
+ hash: sha512Hash,
+ },
+}
+
+type sizedOptions struct {
+ num int64
+ size int64
+ isRand bool
+ realFile bool
+}
+
+// make a tar:
+// * num is the number of files the tar should have
+// * size is the bytes per file
+// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros)
+// * realFile will write to a TempFile, instead of an in memory buffer
+func sizedTar(opts sizedOptions) io.Reader {
+ var (
+ fh io.ReadWriter
+ err error
+ )
+ if opts.realFile {
+ fh, err = ioutil.TempFile("", "tarsum")
+ if err != nil {
+ return nil
+ }
+ } else {
+ fh = bytes.NewBuffer([]byte{})
+ }
+ tarW := tar.NewWriter(fh)
+ defer tarW.Close()
+ for i := int64(0); i < opts.num; i++ {
+ err := tarW.WriteHeader(&tar.Header{
+ Name: fmt.Sprintf("/testdata%d", i),
+ Mode: 0755,
+ Uid: 0,
+ Gid: 0,
+ Size: opts.size,
+ })
+ if err != nil {
+ return nil
+ }
+ var rBuf []byte
+ if opts.isRand {
+ rBuf = make([]byte, 8)
+ _, err = rand.Read(rBuf)
+ if err != nil {
+ return nil
+ }
+ } else {
+ rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ }
+
+ for i := int64(0); i < opts.size/int64(8); i++ {
+ tarW.Write(rBuf)
+ }
+ }
+ return fh
+}
+
+func emptyTarSum(gzip bool) (TarSum, error) {
+ reader, writer := io.Pipe()
+ tarWriter := tar.NewWriter(writer)
+
+ // Immediately close tarWriter and write-end of the
+ // Pipe in a separate goroutine so we don't block.
+ go func() {
+ tarWriter.Close()
+ writer.Close()
+ }()
+
+ return NewTarSum(reader, !gzip, Version0)
+}
+
+// TestEmptyTar tests that tarsum does not fail to read an empty tar
+// and correctly returns the hex digest of an empty hash.
+func TestEmptyTar(t *testing.T) {
+ // Test without gzip.
+ ts, err := emptyTarSum(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ zeroBlock := make([]byte, 1024)
+ buf := new(bytes.Buffer)
+
+ n, err := io.Copy(buf, ts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) {
+ t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n)
+ }
+
+ expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil))
+ resultSum := ts.Sum(nil)
+
+ if resultSum != expectedSum {
+ t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
+ }
+
+ // Test with gzip.
+ ts, err = emptyTarSum(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf.Reset()
+
+ n, err = io.Copy(buf, ts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bufgz := new(bytes.Buffer)
+ gz := gzip.NewWriter(bufgz)
+ n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock))
+ gz.Close()
+ gzBytes := bufgz.Bytes()
+
+ if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) {
+ t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n)
+ }
+
+ resultSum = ts.Sum(nil)
+
+ if resultSum != expectedSum {
+ t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
+ }
+
+ // Test without ever actually writing anything.
+ if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil {
+ t.Fatal(err)
+ }
+
+ resultSum = ts.Sum(nil)
+
+ if resultSum != expectedSum {
+ t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
+ }
+}
+
+var (
+ md5THash = NewTHash("md5", md5.New)
+ sha1Hash = NewTHash("sha1", sha1.New)
+ sha224Hash = NewTHash("sha224", sha256.New224)
+ sha384Hash = NewTHash("sha384", sha512.New384)
+ sha512Hash = NewTHash("sha512", sha512.New)
+)
+
+func TestTarSums(t *testing.T) {
+ for _, layer := range testLayers {
+ var (
+ fh io.Reader
+ err error
+ )
+ if len(layer.filename) > 0 {
+ fh, err = os.Open(layer.filename)
+ if err != nil {
+ t.Errorf("failed to open %s: %s", layer.filename, err)
+ continue
+ }
+ } else if layer.options != nil {
+ fh = sizedTar(*layer.options)
+ } else {
+ // What else is there to test?
+ t.Errorf("what to do with %#v", layer)
+ continue
+ }
+ if file, ok := fh.(*os.File); ok {
+ defer file.Close()
+ }
+
+ var ts TarSum
+ if layer.hash == nil {
+ // double negatives!
+ ts, err = NewTarSum(fh, !layer.gzip, layer.version)
+ } else {
+ ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash)
+ }
+ if err != nil {
+ t.Errorf("%q :: %q", err, layer.filename)
+ continue
+ }
+
+ // Read variable number of bytes to test dynamic buffer
+ dBuf := make([]byte, 1)
+ _, err = ts.Read(dBuf)
+ if err != nil {
+ t.Errorf("failed to read 1B from %s: %s", layer.filename, err)
+ continue
+ }
+ dBuf = make([]byte, 16*1024)
+ _, err = ts.Read(dBuf)
+ if err != nil {
+ t.Errorf("failed to read 16KB from %s: %s", layer.filename, err)
+ continue
+ }
+
+ // Read and discard remaining bytes
+ _, err = io.Copy(ioutil.Discard, ts)
+ if err != nil {
+ t.Errorf("failed to copy from %s: %s", layer.filename, err)
+ continue
+ }
+ var gotSum string
+ if len(layer.jsonfile) > 0 {
+ jfh, err := os.Open(layer.jsonfile)
+ if err != nil {
+ t.Errorf("failed to open %s: %s", layer.jsonfile, err)
+ continue
+ }
+ buf, err := ioutil.ReadAll(jfh)
+ if err != nil {
+ t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
+ continue
+ }
+ gotSum = ts.Sum(buf)
+ } else {
+ gotSum = ts.Sum(nil)
+ }
+
+ if layer.tarsum != gotSum {
+ t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum)
+ }
+ }
+}
+
+func TestIteration(t *testing.T) {
+ headerTests := []struct {
+ expectedSum string // TODO(vbatts) it would be nice to get individual sums of each
+ version Version
+ hdr *tar.Header
+ data []byte
+ }{
+ {
+ "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd",
+ Version0,
+ &tar.Header{
+ Name: "file.txt",
+ Size: 0,
+ Typeflag: tar.TypeReg,
+ Devminor: 0,
+ Devmajor: 0,
+ },
+ []byte(""),
+ },
+ {
+ "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465",
+ VersionDev,
+ &tar.Header{
+ Name: "file.txt",
+ Size: 0,
+ Typeflag: tar.TypeReg,
+ Devminor: 0,
+ Devmajor: 0,
+ },
+ []byte(""),
+ },
+ {
+ "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef",
+ VersionDev,
+ &tar.Header{
+ Name: "another.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Devminor: 0,
+ Devmajor: 0,
+ },
+ []byte("test"),
+ },
+ {
+ "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd",
+ VersionDev,
+ &tar.Header{
+ Name: "xattrs.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Xattrs: map[string]string{
+ "user.key1": "value1",
+ "user.key2": "value2",
+ },
+ },
+ []byte("test"),
+ },
+ {
+ "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760",
+ VersionDev,
+ &tar.Header{
+ Name: "xattrs.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Xattrs: map[string]string{
+ "user.KEY1": "value1", // adding different case to ensure different sum
+ "user.key2": "value2",
+ },
+ },
+ []byte("test"),
+ },
+ {
+ "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa",
+ Version0,
+ &tar.Header{
+ Name: "xattrs.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Xattrs: map[string]string{
+ "user.NOT": "CALCULATED",
+ },
+ },
+ []byte("test"),
+ },
+ }
+ for _, htest := range headerTests {
+ s, err := renderSumForHeader(htest.version, htest.hdr, htest.data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if s != htest.expectedSum {
+ t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s)
+ }
+ }
+
+}
+
+func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) {
+ buf := bytes.NewBuffer(nil)
+ // first build our test tar
+ tw := tar.NewWriter(buf)
+ if err := tw.WriteHeader(h); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(data); err != nil {
+ return "", err
+ }
+ tw.Close()
+
+ ts, err := NewTarSum(buf, true, v)
+ if err != nil {
+ return "", err
+ }
+ tr := tar.NewReader(ts)
+ for {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ // Signals the end of the archive.
+ break
+ }
+ if err != nil {
+ return "", err
+ }
+ if _, err = io.Copy(ioutil.Discard, tr); err != nil {
+ return "", err
+ }
+ }
+ return ts.Sum(nil), nil
+}
+
+func Benchmark9kTar(b *testing.B) {
+ buf := bytes.NewBuffer([]byte{})
+ fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ n, err := io.Copy(buf, fh)
+ fh.Close()
+
+ reader := bytes.NewReader(buf.Bytes())
+
+ b.SetBytes(n)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reader.Seek(0, 0)
+ ts, err := NewTarSum(reader, true, Version0)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ io.Copy(ioutil.Discard, ts)
+ ts.Sum(nil)
+ }
+}
+
+func Benchmark9kTarGzip(b *testing.B) {
+ buf := bytes.NewBuffer([]byte{})
+ fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ n, err := io.Copy(buf, fh)
+ fh.Close()
+
+ reader := bytes.NewReader(buf.Bytes())
+
+ b.SetBytes(n)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reader.Seek(0, 0)
+ ts, err := NewTarSum(reader, false, Version0)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ io.Copy(ioutil.Discard, ts)
+ ts.Sum(nil)
+ }
+}
+
+// this is a single big file in the tar archive
+func Benchmark1mbSingleFileTar(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false)
+}
+
+// this is a single big file in the tar archive
+func Benchmark1mbSingleFileTarGzip(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true)
+}
+
+// this is 1024 1k files in the tar archive
+func Benchmark1kFilesTar(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false)
+}
+
+// this is 1024 1k files in the tar archive
+func Benchmark1kFilesTarGzip(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true)
+}
+
+func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) {
+ var fh *os.File
+ tarReader := sizedTar(opts)
+ if br, ok := tarReader.(*os.File); ok {
+ fh = br
+ }
+ defer os.Remove(fh.Name())
+ defer fh.Close()
+
+ b.SetBytes(opts.size * opts.num)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ts, err := NewTarSum(fh, !isGzip, Version0)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ io.Copy(ioutil.Discard, ts)
+ ts.Sum(nil)
+ fh.Seek(0, 0)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
new file mode 100644
index 00000000..0f0ba497
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
@@ -0,0 +1 @@
+{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar
new file mode 100644
index 00000000..dfd5c204
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
new file mode 100644
index 00000000..12c18a07
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
@@ -0,0 +1 @@
+{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar
new file mode 100644
index 00000000..880b3f2c
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar
new file mode 100644
index 00000000..1c636b3b
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar
new file mode 100644
index 00000000..b411be97
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar
new file mode 100644
index 00000000..7b5c04a9
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar
new file mode 100644
index 00000000..f8c64586
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json
new file mode 100644
index 00000000..328ea31f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json
@@ -0,0 +1 @@
+{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0}
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar
new file mode 100644
index 00000000..819351d4
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go
new file mode 100644
index 00000000..3a656612
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go
@@ -0,0 +1,132 @@
+package tarsum
+
+import (
+ "errors"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
+
+// versioning of the TarSum algorithm
+// based on the prefix of the hash used
+// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
+type Version int
+
+// Prefix of "tarsum"
+const (
+ Version0 Version = iota
+ Version1
+ // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation
+ VersionDev
+)
+
+// Get a list of all known tarsum Version
+func GetVersions() []Version {
+ v := []Version{}
+ for k := range tarSumVersions {
+ v = append(v, k)
+ }
+ return v
+}
+
+var tarSumVersions = map[Version]string{
+ Version0: "tarsum",
+ Version1: "tarsum.v1",
+ VersionDev: "tarsum.dev",
+}
+
+func (tsv Version) String() string {
+ return tarSumVersions[tsv]
+}
+
+// GetVersionFromTarsum returns the Version from the provided string
+func GetVersionFromTarsum(tarsum string) (Version, error) {
+ tsv := tarsum
+ if strings.Contains(tarsum, "+") {
+ tsv = strings.SplitN(tarsum, "+", 2)[0]
+ }
+ for v, s := range tarSumVersions {
+ if s == tsv {
+ return v, nil
+ }
+ }
+ return -1, ErrNotVersion
+}
+
+// Errors that may be returned by functions in this package
+var (
+ ErrNotVersion = errors.New("string does not include a TarSum Version")
+ ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
+)
+
+// tarHeaderSelector is the interface which different versions
+// of tarsum should use for selecting and ordering tar headers
+// for each item in the archive.
+type tarHeaderSelector interface {
+ selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
+}
+
+type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
+
+func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
+ return f(h)
+}
+
+func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
+ return [][2]string{
+ {"name", h.Name},
+ {"mode", strconv.Itoa(int(h.Mode))},
+ {"uid", strconv.Itoa(h.Uid)},
+ {"gid", strconv.Itoa(h.Gid)},
+ {"size", strconv.Itoa(int(h.Size))},
+ {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))},
+ {"typeflag", string([]byte{h.Typeflag})},
+ {"linkname", h.Linkname},
+ {"uname", h.Uname},
+ {"gname", h.Gname},
+ {"devmajor", strconv.Itoa(int(h.Devmajor))},
+ {"devminor", strconv.Itoa(int(h.Devminor))},
+ }
+}
+
+func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
+ // Get extended attributes.
+ xAttrKeys := make([]string, len(h.Xattrs))
+ for k := range h.Xattrs {
+ xAttrKeys = append(xAttrKeys, k)
+ }
+ sort.Strings(xAttrKeys)
+
+ // Make the slice with enough capacity to hold the 11 basic headers
+ // we want from the v0 selector plus however many xattrs we have.
+ orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
+
+ // Copy all headers from v0 excluding the 'mtime' header (the 5th element).
+ v0headers := v0TarHeaderSelect(h)
+ orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
+ orderedHeaders = append(orderedHeaders, v0headers[6:]...)
+
+ // Finally, append the sorted xattrs.
+ for _, k := range xAttrKeys {
+ orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
+ }
+
+ return
+}
+
+var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
+ Version0: v0TarHeaderSelect,
+ Version1: v1TarHeaderSelect,
+ VersionDev: v1TarHeaderSelect,
+}
+
+func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
+ headerSelector, ok := registeredHeaderSelectors[v]
+ if !ok {
+ return nil, ErrVersionNotImplemented
+ }
+
+ return headerSelector, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go
new file mode 100644
index 00000000..4ddb72ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning_test.go
@@ -0,0 +1,55 @@
+package tarsum
+
+import (
+ "testing"
+)
+
+func TestVersion(t *testing.T) {
+ expected := "tarsum"
+ var v Version
+ if v.String() != expected {
+ t.Errorf("expected %q, got %q", expected, v.String())
+ }
+
+ expected = "tarsum.v1"
+ v = 1
+ if v.String() != expected {
+ t.Errorf("expected %q, got %q", expected, v.String())
+ }
+
+ expected = "tarsum.dev"
+ v = 2
+ if v.String() != expected {
+ t.Errorf("expected %q, got %q", expected, v.String())
+ }
+}
+
+func TestGetVersion(t *testing.T) {
+ testSet := []struct {
+ Str string
+ Expected Version
+ }{
+ {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0},
+ {"tarsum+sha256", Version0},
+ {"tarsum", Version0},
+ {"tarsum.dev", VersionDev},
+ {"tarsum.dev+sha256:deadbeef", VersionDev},
+ }
+
+ for _, ts := range testSet {
+ v, err := GetVersionFromTarsum(ts.Str)
+ if err != nil {
+ t.Fatalf("%q : %s", err, ts.Str)
+ }
+ if v != ts.Expected {
+ t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v)
+ }
+ }
+
+ // test one that does not exist, to ensure it errors
+ str := "weak+md5:abcdeabcde"
+ _, err := GetVersionFromTarsum(str)
+ if err != ErrNotVersion {
+ t.Fatalf("%q : %s", err, str)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go
new file mode 100644
index 00000000..9727ecde
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/writercloser.go
@@ -0,0 +1,22 @@
+package tarsum
+
+import (
+ "io"
+)
+
+type writeCloseFlusher interface {
+ io.WriteCloser
+ Flush() error
+}
+
+type nopCloseFlusher struct {
+ io.Writer
+}
+
+func (n *nopCloseFlusher) Close() error {
+ return nil
+}
+
+func (n *nopCloseFlusher) Flush() error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
new file mode 100644
index 00000000..e363aa79
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
@@ -0,0 +1,305 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+// http://www.gnu.org/software/tar/manual/html_node/Standard.html
+// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "time"
+)
+
+const (
+ blockSize = 512
+
+ // Types
+ TypeReg = '0' // regular file
+ TypeRegA = '\x00' // regular file
+ TypeLink = '1' // hard link
+ TypeSymlink = '2' // symbolic link
+ TypeChar = '3' // character device node
+ TypeBlock = '4' // block device node
+ TypeDir = '5' // directory
+ TypeFifo = '6' // fifo node
+ TypeCont = '7' // reserved
+ TypeXHeader = 'x' // extended header
+ TypeXGlobalHeader = 'g' // global extended header
+ TypeGNULongName = 'L' // Next file has a long name
+ TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
+ TypeGNUSparse = 'S' // sparse file
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+ Name string // name of header file entry
+ Mode int64 // permission and mode bits
+ Uid int // user id of owner
+ Gid int // group id of owner
+ Size int64 // length in bytes
+ ModTime time.Time // modified time
+ Typeflag byte // type of header entry
+ Linkname string // target name of link
+ Uname string // user name of owner
+ Gname string // group name of owner
+ Devmajor int64 // major number of character or block device
+ Devminor int64 // minor number of character or block device
+ AccessTime time.Time // access time
+ ChangeTime time.Time // status change time
+ Xattrs map[string]string
+}
+
+// File name constants from the tar spec.
+const (
+ fileNameSize = 100 // Maximum number of bytes in a standard tar name.
+ fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
+)
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+ h *Header
+}
+
+func (fi headerFileInfo) Size() int64 { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{} { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+ if fi.IsDir() {
+ return path.Base(path.Clean(fi.h.Name))
+ }
+ return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+ // Set file permission bits.
+ mode = os.FileMode(fi.h.Mode).Perm()
+
+ // Set setuid, setgid and sticky bits.
+ if fi.h.Mode&c_ISUID != 0 {
+ // setuid
+ mode |= os.ModeSetuid
+ }
+ if fi.h.Mode&c_ISGID != 0 {
+ // setgid
+ mode |= os.ModeSetgid
+ }
+ if fi.h.Mode&c_ISVTX != 0 {
+ // sticky
+ mode |= os.ModeSticky
+ }
+
+ // Set file mode bits.
+ // clear perm, setuid, setgid and sticky bits.
+ m := os.FileMode(fi.h.Mode) &^ 07777
+ if m == c_ISDIR {
+ // directory
+ mode |= os.ModeDir
+ }
+ if m == c_ISFIFO {
+ // named pipe (FIFO)
+ mode |= os.ModeNamedPipe
+ }
+ if m == c_ISLNK {
+ // symbolic link
+ mode |= os.ModeSymlink
+ }
+ if m == c_ISBLK {
+ // device file
+ mode |= os.ModeDevice
+ }
+ if m == c_ISCHR {
+ // Unix character device
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ }
+ if m == c_ISSOCK {
+ // Unix domain socket
+ mode |= os.ModeSocket
+ }
+
+ switch fi.h.Typeflag {
+ case TypeLink, TypeSymlink:
+ // hard link, symbolic link
+ mode |= os.ModeSymlink
+ case TypeChar:
+ // character device node
+ mode |= os.ModeDevice
+ mode |= os.ModeCharDevice
+ case TypeBlock:
+ // block device node
+ mode |= os.ModeDevice
+ case TypeDir:
+ // directory
+ mode |= os.ModeDir
+ case TypeFifo:
+ // fifo node
+ mode |= os.ModeNamedPipe
+ }
+
+ return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+ c_ISDIR = 040000 // Directory
+ c_ISFIFO = 010000 // FIFO
+ c_ISREG = 0100000 // Regular file
+ c_ISLNK = 0120000 // Symbolic link
+ c_ISBLK = 060000 // Block special file
+ c_ISCHR = 020000 // Character special file
+ c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+ paxAtime = "atime"
+ paxCharset = "charset"
+ paxComment = "comment"
+ paxCtime = "ctime" // please note that ctime is not a valid pax header.
+ paxGid = "gid"
+ paxGname = "gname"
+ paxLinkpath = "linkpath"
+ paxMtime = "mtime"
+ paxPath = "path"
+ paxSize = "size"
+ paxUid = "uid"
+ paxUname = "uname"
+ paxXattr = "SCHILY.xattr."
+ paxNone = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+ if fi == nil {
+ return nil, errors.New("tar: FileInfo is nil")
+ }
+ fm := fi.Mode()
+ h := &Header{
+ Name: fi.Name(),
+ ModTime: fi.ModTime(),
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
+ }
+ switch {
+ case fm.IsRegular():
+ h.Mode |= c_ISREG
+ h.Typeflag = TypeReg
+ h.Size = fi.Size()
+ case fi.IsDir():
+ h.Typeflag = TypeDir
+ h.Mode |= c_ISDIR
+ h.Name += "/"
+ case fm&os.ModeSymlink != 0:
+ h.Typeflag = TypeSymlink
+ h.Mode |= c_ISLNK
+ h.Linkname = link
+ case fm&os.ModeDevice != 0:
+ if fm&os.ModeCharDevice != 0 {
+ h.Mode |= c_ISCHR
+ h.Typeflag = TypeChar
+ } else {
+ h.Mode |= c_ISBLK
+ h.Typeflag = TypeBlock
+ }
+ case fm&os.ModeNamedPipe != 0:
+ h.Typeflag = TypeFifo
+ h.Mode |= c_ISFIFO
+ case fm&os.ModeSocket != 0:
+ h.Mode |= c_ISSOCK
+ default:
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+ }
+ if fm&os.ModeSetuid != 0 {
+ h.Mode |= c_ISUID
+ }
+ if fm&os.ModeSetgid != 0 {
+ h.Mode |= c_ISGID
+ }
+ if fm&os.ModeSticky != 0 {
+ h.Mode |= c_ISVTX
+ }
+ if sysStat != nil {
+ return h, sysStat(fi, h)
+ }
+ return h, nil
+}
+
+var zeroBlock = make([]byte, blockSize)
+
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
+// We compute and return both.
+func checksum(header []byte) (unsigned int64, signed int64) {
+ for i := 0; i < len(header); i++ {
+ if i == 148 {
+ // The chksum field (header[148:156]) is special: it should be treated as space bytes.
+ unsigned += ' ' * 8
+ signed += ' ' * 8
+ i += 7
+ continue
+ }
+ unsigned += int64(header[i])
+ signed += int64(int8(header[i]))
+ }
+ return
+}
+
+type slicer []byte
+
+func (sp *slicer) next(n int) (b []byte) {
+ s := *sp
+ b, *sp = s[0:n], s[n:]
+ return
+}
+
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c >= 0x80 {
+ return false
+ }
+ }
+ return true
+}
+
+func toASCII(s string) string {
+ if isASCII(s) {
+ return s
+ }
+ var buf bytes.Buffer
+ for _, c := range s {
+ if c < 0x80 {
+ buf.WriteByte(byte(c))
+ }
+ }
+ return buf.String()
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
new file mode 100644
index 00000000..351eaa0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
@@ -0,0 +1,79 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar_test
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+func Example() {
+ // Create a buffer to write our archive to.
+ buf := new(bytes.Buffer)
+
+ // Create a new tar archive.
+ tw := tar.NewWriter(buf)
+
+ // Add some files to the archive.
+ var files = []struct {
+ Name, Body string
+ }{
+ {"readme.txt", "This archive contains some text files."},
+ {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
+ {"todo.txt", "Get animal handling licence."},
+ }
+ for _, file := range files {
+ hdr := &tar.Header{
+ Name: file.Name,
+ Size: int64(len(file.Body)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ log.Fatalln(err)
+ }
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
+ log.Fatalln(err)
+ }
+ }
+ // Make sure to check the error on Close.
+ if err := tw.Close(); err != nil {
+ log.Fatalln(err)
+ }
+
+ // Open the tar archive for reading.
+ r := bytes.NewReader(buf.Bytes())
+ tr := tar.NewReader(r)
+
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("Contents of %s:\n", hdr.Name)
+ if _, err := io.Copy(os.Stdout, tr); err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Println()
+ }
+
+ // Output:
+ // Contents of readme.txt:
+ // This archive contains some text files.
+ // Contents of gopher.txt:
+ // Gopher names:
+ // George
+ // Geoffrey
+ // Gonzo
+ // Contents of todo.txt:
+ // Get animal handling licence.
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
new file mode 100644
index 00000000..a27559d0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
@@ -0,0 +1,820 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - pax extensions
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+const maxNanoSecondIntSize = 9
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+ r io.Reader
+ err error
+ pad int64 // amount of padding (ignored) after current file entry
+ curr numBytesReader // reader for current file entry
+ hdrBuff [blockSize]byte // buffer to use in readHeader
+}
+
+// A numBytesReader is an io.Reader with a numBytes method, returning the number
+// of bytes remaining in the underlying encoded data.
+type numBytesReader interface {
+ io.Reader
+ numBytes() int64
+}
+
+// A regFileReader is a numBytesReader for reading file data from a tar archive.
+type regFileReader struct {
+ r io.Reader // underlying reader
+ nb int64 // number of unread bytes for current file entry
+}
+
+// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
+type sparseFileReader struct {
+ rfr *regFileReader // reads the sparse-encoded file data
+ sp []sparseEntry // the sparse map for the file
+ pos int64 // keeps track of file position
+ tot int64 // total size of the file
+}
+
+// Keywords for GNU sparse files in a PAX extended header
+const (
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+ paxGNUSparseOffset = "GNU.sparse.offset"
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
+ paxGNUSparseMap = "GNU.sparse.map"
+ paxGNUSparseName = "GNU.sparse.name"
+ paxGNUSparseMajor = "GNU.sparse.major"
+ paxGNUSparseMinor = "GNU.sparse.minor"
+ paxGNUSparseSize = "GNU.sparse.size"
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
+)
+
+// Keywords for old GNU sparse headers
+const (
+ oldGNUSparseMainHeaderOffset = 386
+ oldGNUSparseMainHeaderIsExtendedOffset = 482
+ oldGNUSparseMainHeaderNumEntries = 4
+ oldGNUSparseExtendedHeaderIsExtendedOffset = 504
+ oldGNUSparseExtendedHeaderNumEntries = 21
+ oldGNUSparseOffsetSize = 12
+ oldGNUSparseNumBytesSize = 12
+)
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+func (tr *Reader) Next() (*Header, error) {
+ var hdr *Header
+ if tr.err == nil {
+ tr.skipUnread()
+ }
+ if tr.err != nil {
+ return hdr, tr.err
+ }
+ hdr = tr.readHeader()
+ if hdr == nil {
+ return hdr, tr.err
+ }
+ // Check for PAX/GNU header.
+ switch hdr.Typeflag {
+ case TypeXHeader:
+ // PAX extended header
+ headers, err := parsePAX(tr)
+ if err != nil {
+ return nil, err
+ }
+ // We actually read the whole file,
+ // but this skips alignment padding
+ tr.skipUnread()
+ hdr = tr.readHeader()
+ mergePAX(hdr, headers)
+
+ // Check for a PAX format sparse file
+ sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
+ if err != nil {
+ tr.err = err
+ return nil, err
+ }
+ if sp != nil {
+ // Current file is a PAX format GNU sparse file.
+ // Set the current file reader to a sparse file reader.
+ tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
+ }
+ return hdr, nil
+ case TypeGNULongName:
+ // We have a GNU long name header. Its contents are the real file name.
+ realname, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ hdr, err := tr.Next()
+ hdr.Name = cString(realname)
+ return hdr, err
+ case TypeGNULongLink:
+ // We have a GNU long link header.
+ realname, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ hdr, err := tr.Next()
+ hdr.Linkname = cString(realname)
+ return hdr, err
+ }
+ return hdr, tr.err
+}
+
+// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
+// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
+// be treated as a regular file.
+func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
+ var sparseFormat string
+
+ // Check for sparse format indicators
+ major, majorOk := headers[paxGNUSparseMajor]
+ minor, minorOk := headers[paxGNUSparseMinor]
+ sparseName, sparseNameOk := headers[paxGNUSparseName]
+ _, sparseMapOk := headers[paxGNUSparseMap]
+ sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
+ sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
+
+ // Identify which, if any, sparse format applies from which PAX headers are set
+ if majorOk && minorOk {
+ sparseFormat = major + "." + minor
+ } else if sparseNameOk && sparseMapOk {
+ sparseFormat = "0.1"
+ } else if sparseSizeOk {
+ sparseFormat = "0.0"
+ } else {
+ // Not a PAX format GNU sparse file.
+ return nil, nil
+ }
+
+ // Check for unknown sparse format
+ if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
+ return nil, nil
+ }
+
+ // Update hdr from GNU sparse PAX headers
+ if sparseNameOk {
+ hdr.Name = sparseName
+ }
+ if sparseSizeOk {
+ realSize, err := strconv.ParseInt(sparseSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ } else if sparseRealSizeOk {
+ realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ hdr.Size = realSize
+ }
+
+ // Set up the sparse map, according to the particular sparse format in use
+ var sp []sparseEntry
+ var err error
+ switch sparseFormat {
+ case "0.0", "0.1":
+ sp, err = readGNUSparseMap0x1(headers)
+ case "1.0":
+ sp, err = readGNUSparseMap1x0(tr.curr)
+ }
+ return sp, err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) error {
+ for k, v := range headers {
+ switch k {
+ case paxPath:
+ hdr.Name = v
+ case paxLinkpath:
+ hdr.Linkname = v
+ case paxGname:
+ hdr.Gname = v
+ case paxUname:
+ hdr.Uname = v
+ case paxUid:
+ uid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = int(uid)
+ case paxGid:
+ gid, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = int(gid)
+ case paxAtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.AccessTime = t
+ case paxMtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ModTime = t
+ case paxCtime:
+ t, err := parsePAXTime(v)
+ if err != nil {
+ return err
+ }
+ hdr.ChangeTime = t
+ case paxSize:
+ size, err := strconv.ParseInt(v, 10, 0)
+ if err != nil {
+ return err
+ }
+ hdr.Size = int64(size)
+ default:
+ if strings.HasPrefix(k, paxXattr) {
+ if hdr.Xattrs == nil {
+ hdr.Xattrs = make(map[string]string)
+ }
+ hdr.Xattrs[k[len(paxXattr):]] = v
+ }
+ }
+ }
+ return nil
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in
+// the PAX specification.
+func parsePAXTime(t string) (time.Time, error) {
+ buf := []byte(t)
+ pos := bytes.IndexByte(buf, '.')
+ var seconds, nanoseconds int64
+ var err error
+ if pos == -1 {
+ seconds, err = strconv.ParseInt(t, 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ } else {
+ seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nano_buf := string(buf[pos+1:])
+ // Pad as needed before converting to a decimal.
+ // For example .030 -> .030000000 -> 30000000 nanoseconds
+ if len(nano_buf) < maxNanoSecondIntSize {
+ // Right pad
+ nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
+ } else if len(nano_buf) > maxNanoSecondIntSize {
+ // Right truncate
+ nano_buf = nano_buf[:maxNanoSecondIntSize]
+ }
+ nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
+ if err != nil {
+ return time.Time{}, err
+ }
+ }
+ ts := time.Unix(seconds, nanoseconds)
+ return ts, nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ // For GNU PAX sparse format 0.0 support.
+ // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
+ var sparseMap bytes.Buffer
+
+ headers := make(map[string]string)
+ // Each record is constructed as
+ // "%d %s=%s\n", length, keyword, value
+ for len(buf) > 0 {
+ // or the header was empty to start with.
+ var sp int
+ // The size field ends at the first space.
+ sp = bytes.IndexByte(buf, ' ')
+ if sp == -1 {
+ return nil, ErrHeader
+ }
+ // Parse the first token as a decimal integer.
+ n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ // Extract everything between the decimal and the n -1 on the
+ // beginning to eat the ' ', -1 on the end to skip the newline.
+ var record []byte
+ record, buf = buf[sp+1:n-1], buf[n:]
+ // The first equals is guaranteed to mark the end of the key.
+ // Everything else is value.
+ eq := bytes.IndexByte(record, '=')
+ if eq == -1 {
+ return nil, ErrHeader
+ }
+ key, value := record[:eq], record[eq+1:]
+
+ keyStr := string(key)
+ if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
+ // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
+ sparseMap.Write(value)
+ sparseMap.Write([]byte{','})
+ } else {
+ // Normal key. Set the value in the headers map.
+ headers[keyStr] = string(value)
+ }
+ }
+ if sparseMap.Len() != 0 {
+ // Add sparse info to headers, chopping off the extra comma
+ sparseMap.Truncate(sparseMap.Len() - 1)
+ headers[paxGNUSparseMap] = sparseMap.String()
+ }
+ return headers, nil
+}
+
+// cString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func cString(b []byte) string {
+ n := 0
+ for n < len(b) && b[n] != 0 {
+ n++
+ }
+ return string(b[0:n])
+}
+
+func (tr *Reader) octal(b []byte) int64 {
+ // Check for binary format first.
+ if len(b) > 0 && b[0]&0x80 != 0 {
+ var x int64
+ for i, c := range b {
+ if i == 0 {
+ c &= 0x7f // ignore signal bit in first byte
+ }
+ x = x<<8 | int64(c)
+ }
+ return x
+ }
+
+ // Because unused fields are filled with NULs, we need
+ // to skip leading NULs. Fields may also be padded with
+ // spaces or NULs.
+ // So we remove leading and trailing NULs and spaces to
+ // be sure.
+ b = bytes.Trim(b, " \x00")
+
+ if len(b) == 0 {
+ return 0
+ }
+ x, err := strconv.ParseUint(cString(b), 8, 64)
+ if err != nil {
+ tr.err = err
+ }
+ return int64(x)
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
+func (tr *Reader) skipUnread() {
+ nr := tr.numBytes() + tr.pad // number of bytes to skip
+ tr.curr, tr.pad = nil, 0
+ if sr, ok := tr.r.(io.Seeker); ok {
+ if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
+ return
+ }
+ }
+ _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
+}
+
+func (tr *Reader) verifyChecksum(header []byte) bool {
+ if tr.err != nil {
+ return false
+ }
+
+ given := tr.octal(header[148:156])
+ unsigned, signed := checksum(header)
+ return given == unsigned || given == signed
+}
+
+func (tr *Reader) readHeader() *Header {
+ header := tr.hdrBuff[:]
+ copy(header, zeroBlock)
+
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil
+ }
+
+ // Two blocks of zero bytes marks the end of the archive.
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
+ return nil
+ }
+ if bytes.Equal(header, zeroBlock[0:blockSize]) {
+ tr.err = io.EOF
+ } else {
+ tr.err = ErrHeader // zero block and then non-zero block
+ }
+ return nil
+ }
+
+ if !tr.verifyChecksum(header) {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Unpack
+ hdr := new(Header)
+ s := slicer(header)
+
+ hdr.Name = cString(s.next(100))
+ hdr.Mode = tr.octal(s.next(8))
+ hdr.Uid = int(tr.octal(s.next(8)))
+ hdr.Gid = int(tr.octal(s.next(8)))
+ hdr.Size = tr.octal(s.next(12))
+ hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
+ s.next(8) // chksum
+ hdr.Typeflag = s.next(1)[0]
+ hdr.Linkname = cString(s.next(100))
+
+ // The remainder of the header depends on the value of magic.
+ // The original (v7) version of tar had no explicit magic field,
+ // so its magic bytes, like the rest of the block, are NULs.
+ magic := string(s.next(8)) // contains version field as well.
+ var format string
+ switch {
+ case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
+ if string(header[508:512]) == "tar\x00" {
+ format = "star"
+ } else {
+ format = "posix"
+ }
+ case magic == "ustar \x00": // old GNU tar
+ format = "gnu"
+ }
+
+ switch format {
+ case "posix", "gnu", "star":
+ hdr.Uname = cString(s.next(32))
+ hdr.Gname = cString(s.next(32))
+ devmajor := s.next(8)
+ devminor := s.next(8)
+ if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+ hdr.Devmajor = tr.octal(devmajor)
+ hdr.Devminor = tr.octal(devminor)
+ }
+ var prefix string
+ switch format {
+ case "posix", "gnu":
+ prefix = cString(s.next(155))
+ case "star":
+ prefix = cString(s.next(131))
+ hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
+ hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
+ }
+ if len(prefix) > 0 {
+ hdr.Name = prefix + "/" + hdr.Name
+ }
+ }
+
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+
+ // Maximum value of hdr.Size is 64 GB (12 octal digits),
+ // so there's no risk of int64 overflowing.
+ nb := int64(hdr.Size)
+ tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+
+ // Set the current file reader.
+ tr.curr = ®FileReader{r: tr.r, nb: nb}
+
+ // Check for old GNU sparse format entry.
+ if hdr.Typeflag == TypeGNUSparse {
+ // Get the real size of the file.
+ hdr.Size = tr.octal(header[483:495])
+
+ // Read the sparse map.
+ sp := tr.readOldGNUSparseMap(header)
+ if tr.err != nil {
+ return nil
+ }
+ // Current file is a GNU sparse file. Update the current file reader.
+ tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
+ }
+
+ return hdr
+}
+
+// A sparseEntry holds a single entry in a sparse file's sparse map.
+// A sparse entry indicates the offset and size in a sparse file of a
+// block of data.
+type sparseEntry struct {
+ offset int64
+ numBytes int64
+}
+
+// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
+// then one or more extension headers are used to store the rest of the sparse map.
+func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
+ isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
+ spCap := oldGNUSparseMainHeaderNumEntries
+ if isExtended {
+ spCap += oldGNUSparseExtendedHeaderNumEntries
+ }
+ sp := make([]sparseEntry, 0, spCap)
+ s := slicer(header[oldGNUSparseMainHeaderOffset:])
+
+ // Read the four entries from the main tar header
+ for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
+ offset := tr.octal(s.next(oldGNUSparseOffsetSize))
+ numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ for isExtended {
+ // There are more entries. Read an extension header and parse its entries.
+ sparseHeader := make([]byte, blockSize)
+ if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
+ return nil
+ }
+ isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
+ s = slicer(sparseHeader)
+ for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
+ offset := tr.octal(s.next(oldGNUSparseOffsetSize))
+ numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
+ if tr.err != nil {
+ tr.err = ErrHeader
+ return nil
+ }
+ if offset == 0 && numBytes == 0 {
+ break
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+ }
+ return sp
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
+// The sparse map is stored just before the file data and padded out to the nearest block boundary.
+func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
+ buf := make([]byte, 2*blockSize)
+ sparseHeader := buf[:blockSize]
+
+ // readDecimal is a helper function to read a decimal integer from the sparse map
+ // while making sure to read from the file in blocks of size blockSize
+ readDecimal := func() (int64, error) {
+ // Look for newline
+ nl := bytes.IndexByte(sparseHeader, '\n')
+ if nl == -1 {
+ if len(sparseHeader) >= blockSize {
+ // This is an error
+ return 0, ErrHeader
+ }
+ oldLen := len(sparseHeader)
+ newLen := oldLen + blockSize
+ if cap(sparseHeader) < newLen {
+ // There's more header, but we need to make room for the next block
+ copy(buf, sparseHeader)
+ sparseHeader = buf[:newLen]
+ } else {
+ // There's more header, and we can just reslice
+ sparseHeader = sparseHeader[:newLen]
+ }
+
+ // Now that sparseHeader is large enough, read next block
+ if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
+ return 0, err
+ }
+
+ // Look for a newline in the new data
+ nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
+ if nl == -1 {
+ // This is an error
+ return 0, ErrHeader
+ }
+ nl += oldLen // We want the position from the beginning
+ }
+ // Now that we've found a newline, read a number
+ n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
+ if err != nil {
+ return 0, ErrHeader
+ }
+
+ // Update sparseHeader to consume this number
+ sparseHeader = sparseHeader[nl+1:]
+ return n, nil
+ }
+
+ // Read the first block
+ if _, err := io.ReadFull(r, sparseHeader); err != nil {
+ return nil, err
+ }
+
+ // The first line contains the number of entries
+ numEntries, err := readDecimal()
+ if err != nil {
+ return nil, err
+ }
+
+ // Read all the entries
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ // Read the offset
+ offset, err := readDecimal()
+ if err != nil {
+ return nil, err
+ }
+ // Read numBytes
+ numBytes, err := readDecimal()
+ if err != nil {
+ return nil, err
+ }
+
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ return sp, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
+// The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
+ // Get number of entries
+ numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
+ if !ok {
+ return nil, ErrHeader
+ }
+ numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+
+ sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
+
+ // There should be two numbers in sparseMap for each entry
+ if int64(len(sparseMap)) != 2*numEntries {
+ return nil, ErrHeader
+ }
+
+ // Loop through the entries in the sparse map
+ sp := make([]sparseEntry, 0, numEntries)
+ for i := int64(0); i < numEntries; i++ {
+ offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
+ if err != nil {
+ return nil, ErrHeader
+ }
+ sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+ }
+
+ return sp, nil
+}
+
+// numBytes returns the number of bytes left to read in the current file's entry
+// in the tar archive, or 0 if there is no current file.
+func (tr *Reader) numBytes() int64 {
+ if tr.curr == nil {
+ // No current file, so no bytes
+ return 0
+ }
+ return tr.curr.numBytes()
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+func (tr *Reader) Read(b []byte) (n int, err error) {
+ if tr.curr == nil {
+ return 0, io.EOF
+ }
+ n, err = tr.curr.Read(b)
+ if err != nil && err != io.EOF {
+ tr.err = err
+ }
+ return
+}
+
+func (rfr *regFileReader) Read(b []byte) (n int, err error) {
+ if rfr.nb == 0 {
+ // file consumed
+ return 0, io.EOF
+ }
+ if int64(len(b)) > rfr.nb {
+ b = b[0:rfr.nb]
+ }
+ n, err = rfr.r.Read(b)
+ rfr.nb -= int64(n)
+
+ if err == io.EOF && rfr.nb > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// numBytes returns the number of bytes left to read in the file's data in the tar archive.
+func (rfr *regFileReader) numBytes() int64 {
+ return rfr.nb
+}
+
+// readHole reads a sparse file hole ending at offset toOffset
+func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
+ n64 := toOffset - sfr.pos
+ if n64 > int64(len(b)) {
+ n64 = int64(len(b))
+ }
+ n := int(n64)
+ for i := 0; i < n; i++ {
+ b[i] = 0
+ }
+ sfr.pos += n64
+ return n
+}
+
+// Read reads the sparse file data in expanded form.
+func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
+ if len(sfr.sp) == 0 {
+ // No more data fragments to read from.
+ if sfr.pos < sfr.tot {
+ // We're in the last hole
+ n = sfr.readHole(b, sfr.tot)
+ return
+ }
+ // Otherwise, we're at the end of the file
+ return 0, io.EOF
+ }
+ if sfr.pos < sfr.sp[0].offset {
+ // We're in a hole
+ n = sfr.readHole(b, sfr.sp[0].offset)
+ return
+ }
+
+ // We're not in a hole, so we'll read from the next data fragment
+ posInFragment := sfr.pos - sfr.sp[0].offset
+ bytesLeft := sfr.sp[0].numBytes - posInFragment
+ if int64(len(b)) > bytesLeft {
+ b = b[0:bytesLeft]
+ }
+
+ n, err = sfr.rfr.Read(b)
+ sfr.pos += int64(n)
+
+ if int64(n) == bytesLeft {
+ // We're done with this fragment
+ sfr.sp = sfr.sp[1:]
+ }
+
+ if err == io.EOF && sfr.pos < sfr.tot {
+ // We reached the end of the last fragment's data, but there's a final hole
+ err = nil
+ }
+ return
+}
+
+// numBytes returns the number of bytes left to read in the sparse file's
+// sparse-encoded data in the tar archive.
+func (sfr *sparseFileReader) numBytes() int64 {
+ return sfr.rfr.nb
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
new file mode 100644
index 00000000..9601ffe4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "crypto/md5"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type untarTest struct {
+ file string
+ headers []*Header
+ cksums []string
+}
+
+var gnuTarTest = &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244428340, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244436044, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ cksums: []string{
+ "e38b27eaccb4391bdec553a7f3ae6b2f",
+ "c65bd2e50a56a2138bf1716f2fd56fe9",
+ },
+}
+
+var sparseTarTest = &untarTest{
+ file: "testdata/sparse-formats.tar",
+ headers: []*Header{
+ {
+ Name: "sparse-gnu",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392395740, 0),
+ Typeflag: 0x53,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "sparse-posix-0.0",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392342187, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "sparse-posix-0.1",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392340456, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "sparse-posix-1.0",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 200,
+ ModTime: time.Unix(1392337404, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ {
+ Name: "end",
+ Mode: 420,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 4,
+ ModTime: time.Unix(1392398319, 0),
+ Typeflag: 0x30,
+ Linkname: "",
+ Uname: "david",
+ Gname: "david",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ },
+ cksums: []string{
+ "6f53234398c2449fe67c1812d993012f",
+ "6f53234398c2449fe67c1812d993012f",
+ "6f53234398c2449fe67c1812d993012f",
+ "6f53234398c2449fe67c1812d993012f",
+ "b0061974914468de549a2af8ced10316",
+ },
+}
+
+var untarTests = []*untarTest{
+ gnuTarTest,
+ sparseTarTest,
+ {
+ file: "testdata/star.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244592783, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ AccessTime: time.Unix(1244592783, 0),
+ ChangeTime: time.Unix(1244592783, 0),
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244592783, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ AccessTime: time.Unix(1244592783, 0),
+ ChangeTime: time.Unix(1244592783, 0),
+ },
+ },
+ },
+ {
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1244593104, 0),
+ Typeflag: '\x00',
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1244593104, 0),
+ Typeflag: '\x00',
+ },
+ },
+ },
+ {
+ file: "testdata/pax.tar",
+ headers: []*Header{
+ {
+ Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ Mode: 0664,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "shane",
+ Gname: "shane",
+ Size: 7,
+ ModTime: time.Unix(1350244992, 23960108),
+ ChangeTime: time.Unix(1350244992, 23960108),
+ AccessTime: time.Unix(1350244992, 23960108),
+ Typeflag: TypeReg,
+ },
+ {
+ Name: "a/b",
+ Mode: 0777,
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "shane",
+ Gname: "shane",
+ Size: 0,
+ ModTime: time.Unix(1350266320, 910238425),
+ ChangeTime: time.Unix(1350266320, 910238425),
+ AccessTime: time.Unix(1350266320, 910238425),
+ Typeflag: TypeSymlink,
+ Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
+ },
+ },
+ },
+ {
+ file: "testdata/nil-uid.tar", // golang.org/issue/5290
+ headers: []*Header{
+ {
+ Name: "P1050238.JPG.log",
+ Mode: 0664,
+ Uid: 0,
+ Gid: 0,
+ Size: 14,
+ ModTime: time.Unix(1365454838, 0),
+ Typeflag: TypeReg,
+ Linkname: "",
+ Uname: "eyefi",
+ Gname: "eyefi",
+ Devmajor: 0,
+ Devminor: 0,
+ },
+ },
+ },
+ {
+ file: "testdata/xattrs.tar",
+ headers: []*Header{
+ {
+ Name: "small.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 10,
+ Size: 5,
+ ModTime: time.Unix(1386065770, 448252320),
+ Typeflag: '0',
+ Uname: "alex",
+ Gname: "wheel",
+ AccessTime: time.Unix(1389782991, 419875220),
+ ChangeTime: time.Unix(1389782956, 794414986),
+ Xattrs: map[string]string{
+ "user.key": "value",
+ "user.key2": "value2",
+ // Interestingly, selinux encodes the terminating null inside the xattr
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ },
+ {
+ Name: "small2.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 10,
+ Size: 11,
+ ModTime: time.Unix(1386065770, 449252304),
+ Typeflag: '0',
+ Uname: "alex",
+ Gname: "wheel",
+ AccessTime: time.Unix(1389782991, 419875220),
+ ChangeTime: time.Unix(1386065770, 449252304),
+ Xattrs: map[string]string{
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
+ },
+ },
+ },
+ },
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ defer f.Close()
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(*hdr, *header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ continue testLoop
+ }
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
+ }
+ }
+}
+
+func TestPartialRead(t *testing.T) {
+ f, err := os.Open("testdata/gnu.tar")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ // Read the first four bytes; Next() should skip the last byte.
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("Didn't get first file: %v", err)
+ }
+ buf := make([]byte, 4)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
+ t.Errorf("Contents = %v, want %v", buf, expected)
+ }
+
+ // Second file
+ hdr, err = tr.Next()
+ if err != nil || hdr == nil {
+ t.Fatalf("Didn't get second file: %v", err)
+ }
+ buf = make([]byte, 6)
+ if _, err := io.ReadFull(tr, buf); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if expected := []byte("Google"); !bytes.Equal(buf, expected) {
+ t.Errorf("Contents = %v, want %v", buf, expected)
+ }
+}
+
+func TestIncrementalRead(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ headers := test.headers
+ cksums := test.cksums
+ nread := 0
+
+ // loop over all files
+ for ; ; nread++ {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ break
+ }
+
+ // check the header
+ if !reflect.DeepEqual(*hdr, *headers[nread]) {
+ t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
+ *hdr, headers[nread])
+ }
+
+ // read file contents in little chunks EOF,
+ // checksumming all the way
+ h := md5.New()
+ rdbuf := make([]uint8, 8)
+ for {
+ nr, err := tr.Read(rdbuf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read: unexpected error %v\n", err)
+ break
+ }
+ h.Write(rdbuf[0:nr])
+ }
+ // verify checksum
+ have := fmt.Sprintf("%x", h.Sum(nil))
+ want := cksums[nread]
+ if want != have {
+ t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
+ }
+ }
+ if nread != len(headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
+ }
+}
+
+func TestNonSeekable(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ type readerOnly struct {
+ io.Reader
+ }
+ tr := NewReader(readerOnly{f})
+ nread := 0
+
+ for ; ; nread++ {
+ _, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ }
+
+ if nread != len(test.headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
+ }
+}
+
+func TestParsePAXHeader(t *testing.T) {
+ paxTests := [][3]string{
+ {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
+ {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
+ {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
+ for _, test := range paxTests {
+ key, expected, raw := test[0], test[1], test[2]
+ reader := bytes.NewReader([]byte(raw))
+ headers, err := parsePAX(reader)
+ if err != nil {
+ t.Errorf("Couldn't parse correctly formatted headers: %v", err)
+ continue
+ }
+ if strings.EqualFold(headers[key], expected) {
+ t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
+ continue
+ }
+ trailer := make([]byte, 100)
+ n, err := reader.Read(trailer)
+ if err != io.EOF || n != 0 {
+ t.Error("Buffer wasn't consumed")
+ }
+ }
+ badHeader := bytes.NewReader([]byte("3 somelongkey="))
+ if _, err := parsePAX(badHeader); err != ErrHeader {
+ t.Fatal("Unexpected success when parsing bad header")
+ }
+}
+
+func TestParsePAXTime(t *testing.T) {
+ // Some valid PAX time values
+ timestamps := map[string]time.Time{
+ "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case
+ "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
+ "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
+ "1350244992": time.Unix(1350244992, 0), // Low precision value
+ }
+ for input, expected := range timestamps {
+ ts, err := parsePAXTime(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ts.Equal(expected) {
+ t.Fatalf("Time parsing failure %s %s", ts, expected)
+ }
+ }
+}
+
+func TestMergePAX(t *testing.T) {
+ hdr := new(Header)
+ // Test a string, integer, and time based value.
+ headers := map[string]string{
+ "path": "a/b/c",
+ "uid": "1000",
+ "mtime": "1350244992.023960108",
+ }
+ err := mergePAX(hdr, headers)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &Header{
+ Name: "a/b/c",
+ Uid: 1000,
+ ModTime: time.Unix(1350244992, 23960108),
+ }
+ if !reflect.DeepEqual(hdr, want) {
+ t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
+ }
+}
+
+func TestSparseEndToEnd(t *testing.T) {
+ test := sparseTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+
+ headers := test.headers
+ cksums := test.cksums
+ nread := 0
+
+ // loop over all files
+ for ; ; nread++ {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ break
+ }
+
+ // check the header
+ if !reflect.DeepEqual(*hdr, *headers[nread]) {
+ t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
+ *hdr, headers[nread])
+ }
+
+ // read and checksum the file data
+ h := md5.New()
+ _, err = io.Copy(h, tr)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ // verify checksum
+ have := fmt.Sprintf("%x", h.Sum(nil))
+ want := cksums[nread]
+ if want != have {
+ t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
+ }
+ }
+ if nread != len(headers) {
+ t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
+ }
+}
+
+type sparseFileReadTest struct {
+ sparseData []byte
+ sparseMap []sparseEntry
+ realSize int64
+ expected []byte
+}
+
+var sparseFileReadTests = []sparseFileReadTest{
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 0, numBytes: 2},
+ {offset: 5, numBytes: 3},
+ },
+ realSize: 8,
+ expected: []byte("ab\x00\x00\x00cde"),
+ },
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 0, numBytes: 2},
+ {offset: 5, numBytes: 3},
+ },
+ realSize: 10,
+ expected: []byte("ab\x00\x00\x00cde\x00\x00"),
+ },
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 1, numBytes: 3},
+ {offset: 6, numBytes: 2},
+ },
+ realSize: 8,
+ expected: []byte("\x00abc\x00\x00de"),
+ },
+ {
+ sparseData: []byte("abcde"),
+ sparseMap: []sparseEntry{
+ {offset: 1, numBytes: 3},
+ {offset: 6, numBytes: 2},
+ },
+ realSize: 10,
+ expected: []byte("\x00abc\x00\x00de\x00\x00"),
+ },
+ {
+ sparseData: []byte(""),
+ sparseMap: nil,
+ realSize: 2,
+ expected: []byte("\x00\x00"),
+ },
+}
+
+func TestSparseFileReader(t *testing.T) {
+ for i, test := range sparseFileReadTests {
+ r := bytes.NewReader(test.sparseData)
+ nb := int64(r.Len())
+ sfr := &sparseFileReader{
+ rfr: ®FileReader{r: r, nb: nb},
+ sp: test.sparseMap,
+ pos: 0,
+ tot: test.realSize,
+ }
+ if sfr.numBytes() != nb {
+ t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
+ }
+ buf, err := ioutil.ReadAll(sfr)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ }
+ if e := test.expected; !bytes.Equal(buf, e) {
+ t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
+ }
+ if sfr.numBytes() != 0 {
+ t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
+ }
+ }
+}
+
+func TestSparseIncrementalRead(t *testing.T) {
+ sparseMap := []sparseEntry{{10, 2}}
+ sparseData := []byte("Go")
+ expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
+
+ r := bytes.NewReader(sparseData)
+ nb := int64(r.Len())
+ sfr := &sparseFileReader{
+ rfr: ®FileReader{r: r, nb: nb},
+ sp: sparseMap,
+ pos: 0,
+ tot: int64(len(expected)),
+ }
+
+ // We'll read the data 6 bytes at a time, with a hole of size 10 at
+ // the beginning and one of size 8 at the end.
+ var outputBuf bytes.Buffer
+ buf := make([]byte, 6)
+ for {
+ n, err := sfr.Read(buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read: unexpected error %v\n", err)
+ }
+ if n > 0 {
+ _, err := outputBuf.Write(buf[:n])
+ if err != nil {
+ t.Errorf("Write: unexpected error %v\n", err)
+ }
+ }
+ }
+ got := outputBuf.String()
+ if got != expected {
+ t.Errorf("Contents = %v, want %v", got, expected)
+ }
+}
+
+func TestReadGNUSparseMap0x1(t *testing.T) {
+ headers := map[string]string{
+ paxGNUSparseNumBlocks: "4",
+ paxGNUSparseMap: "0,5,10,5,20,5,30,5",
+ }
+ expected := []sparseEntry{
+ {offset: 0, numBytes: 5},
+ {offset: 10, numBytes: 5},
+ {offset: 20, numBytes: 5},
+ {offset: 30, numBytes: 5},
+ }
+
+ sp, err := readGNUSparseMap0x1(headers)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(sp, expected) {
+ t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
+ }
+}
+
+func TestReadGNUSparseMap1x0(t *testing.T) {
+ // This test uses lots of holes so the sparse header takes up more than two blocks
+ numEntries := 100
+ expected := make([]sparseEntry, 0, numEntries)
+ sparseMap := new(bytes.Buffer)
+
+ fmt.Fprintf(sparseMap, "%d\n", numEntries)
+ for i := 0; i < numEntries; i++ {
+ offset := int64(2048 * i)
+ numBytes := int64(1024)
+ expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes})
+ fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes)
+ }
+
+ // Make the header the smallest multiple of blockSize that fits the sparseMap
+ headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize
+ bufLen := blockSize * headerBlocks
+ buf := make([]byte, bufLen)
+ copy(buf, sparseMap.Bytes())
+
+ // Get an reader to read the sparse map
+ r := bytes.NewReader(buf)
+
+ // Read the sparse map
+ sp, err := readGNUSparseMap1x0(r)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(sp, expected) {
+ t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
+ }
+}
+
+func TestUninitializedRead(t *testing.T) {
+ test := gnuTarTest
+ f, err := os.Open(test.file)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ defer f.Close()
+
+ tr := NewReader(f)
+ _, err = tr.Read([]byte{})
+ if err == nil || err != io.EOF {
+ t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
new file mode 100644
index 00000000..cf9cc79c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctim.Unix())
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
new file mode 100644
index 00000000..6f17dbe3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+ "syscall"
+ "time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+ return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
new file mode 100644
index 00000000..cb843db4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+ sys, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+ h.Uid = int(sys.Uid)
+ h.Gid = int(sys.Gid)
+ // TODO(bradfitz): populate username & group. os/user
+ // doesn't cache LookupId lookups, and lacks group
+ // lookup functions.
+ h.AccessTime = statAtime(sys)
+ h.ChangeTime = statCtime(sys)
+ // TODO(bradfitz): major/minor device numbers?
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
new file mode 100644
index 00000000..ed333f3e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
@@ -0,0 +1,284 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestFileInfoHeader(t *testing.T) {
+ fi, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Fatalf("FileInfoHeader: %v", err)
+ }
+ if g, e := h.Name, "small.txt"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
+ t.Errorf("Mode = %#o; want %#o", g, e)
+ }
+ if g, e := h.Size, int64(5); g != e {
+ t.Errorf("Size = %v; want %v", g, e)
+ }
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
+ t.Errorf("ModTime = %v; want %v", g, e)
+ }
+ // FileInfoHeader should error when passing nil FileInfo
+ if _, err := FileInfoHeader(nil, ""); err == nil {
+ t.Fatalf("Expected error when passing nil to FileInfoHeader")
+ }
+}
+
+func TestFileInfoHeaderDir(t *testing.T) {
+ fi, err := os.Stat("testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Fatalf("FileInfoHeader: %v", err)
+ }
+ if g, e := h.Name, "testdata/"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ // Ignoring c_ISGID for golang.org/issue/4867
+ if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
+ t.Errorf("Mode = %#o; want %#o", g, e)
+ }
+ if g, e := h.Size, int64(0); g != e {
+ t.Errorf("Size = %v; want %v", g, e)
+ }
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
+ t.Errorf("ModTime = %v; want %v", g, e)
+ }
+}
+
+func TestFileInfoHeaderSymlink(t *testing.T) {
+ h, err := FileInfoHeader(symlink{}, "some-target")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, e := h.Name, "some-symlink"; g != e {
+ t.Errorf("Name = %q; want %q", g, e)
+ }
+ if g, e := h.Linkname, "some-target"; g != e {
+ t.Errorf("Linkname = %q; want %q", g, e)
+ }
+}
+
+type symlink struct{}
+
+func (symlink) Name() string { return "some-symlink" }
+func (symlink) Size() int64 { return 0 }
+func (symlink) Mode() os.FileMode { return os.ModeSymlink }
+func (symlink) ModTime() time.Time { return time.Time{} }
+func (symlink) IsDir() bool { return false }
+func (symlink) Sys() interface{} { return nil }
+
+func TestRoundTrip(t *testing.T) {
+ data := []byte("some file contents")
+
+ var b bytes.Buffer
+ tw := NewWriter(&b)
+ hdr := &Header{
+ Name: "file.txt",
+ Uid: 1 << 21, // too big for 8 octal digits
+ Size: int64(len(data)),
+ ModTime: time.Now(),
+ }
+ // tar only supports second precision.
+ hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("tw.WriteHeader: %v", err)
+ }
+ if _, err := tw.Write(data); err != nil {
+ t.Fatalf("tw.Write: %v", err)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("tw.Close: %v", err)
+ }
+
+ // Read it back.
+ tr := NewReader(&b)
+ rHdr, err := tr.Next()
+ if err != nil {
+ t.Fatalf("tr.Next: %v", err)
+ }
+ if !reflect.DeepEqual(rHdr, hdr) {
+ t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
+ }
+ rData, err := ioutil.ReadAll(tr)
+ if err != nil {
+ t.Fatalf("Read: %v", err)
+ }
+ if !bytes.Equal(rData, data) {
+ t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
+ }
+}
+
+type headerRoundTripTest struct {
+ h *Header
+ fm os.FileMode
+}
+
+func TestHeaderRoundTrip(t *testing.T) {
+ golden := []headerRoundTripTest{
+ // regular file.
+ {
+ h: &Header{
+ Name: "test.txt",
+ Mode: 0644 | c_ISREG,
+ Size: 12,
+ ModTime: time.Unix(1360600916, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0644,
+ },
+ // hard link.
+ {
+ h: &Header{
+ Name: "hard.txt",
+ Mode: 0644 | c_ISLNK,
+ Size: 0,
+ ModTime: time.Unix(1360600916, 0),
+ Typeflag: TypeLink,
+ },
+ fm: 0644 | os.ModeSymlink,
+ },
+ // symbolic link.
+ {
+ h: &Header{
+ Name: "link.txt",
+ Mode: 0777 | c_ISLNK,
+ Size: 0,
+ ModTime: time.Unix(1360600852, 0),
+ Typeflag: TypeSymlink,
+ },
+ fm: 0777 | os.ModeSymlink,
+ },
+ // character device node.
+ {
+ h: &Header{
+ Name: "dev/null",
+ Mode: 0666 | c_ISCHR,
+ Size: 0,
+ ModTime: time.Unix(1360578951, 0),
+ Typeflag: TypeChar,
+ },
+ fm: 0666 | os.ModeDevice | os.ModeCharDevice,
+ },
+ // block device node.
+ {
+ h: &Header{
+ Name: "dev/sda",
+ Mode: 0660 | c_ISBLK,
+ Size: 0,
+ ModTime: time.Unix(1360578954, 0),
+ Typeflag: TypeBlock,
+ },
+ fm: 0660 | os.ModeDevice,
+ },
+ // directory.
+ {
+ h: &Header{
+ Name: "dir/",
+ Mode: 0755 | c_ISDIR,
+ Size: 0,
+ ModTime: time.Unix(1360601116, 0),
+ Typeflag: TypeDir,
+ },
+ fm: 0755 | os.ModeDir,
+ },
+ // fifo node.
+ {
+ h: &Header{
+ Name: "dev/initctl",
+ Mode: 0600 | c_ISFIFO,
+ Size: 0,
+ ModTime: time.Unix(1360578949, 0),
+ Typeflag: TypeFifo,
+ },
+ fm: 0600 | os.ModeNamedPipe,
+ },
+ // setuid.
+ {
+ h: &Header{
+ Name: "bin/su",
+ Mode: 0755 | c_ISREG | c_ISUID,
+ Size: 23232,
+ ModTime: time.Unix(1355405093, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0755 | os.ModeSetuid,
+ },
+ // setguid.
+ {
+ h: &Header{
+ Name: "group.txt",
+ Mode: 0750 | c_ISREG | c_ISGID,
+ Size: 0,
+ ModTime: time.Unix(1360602346, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0750 | os.ModeSetgid,
+ },
+ // sticky.
+ {
+ h: &Header{
+ Name: "sticky.txt",
+ Mode: 0600 | c_ISREG | c_ISVTX,
+ Size: 7,
+ ModTime: time.Unix(1360602540, 0),
+ Typeflag: TypeReg,
+ },
+ fm: 0600 | os.ModeSticky,
+ },
+ }
+
+ for i, g := range golden {
+ fi := g.h.FileInfo()
+ h2, err := FileInfoHeader(fi, "")
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if strings.Contains(fi.Name(), "/") {
+ t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
+ }
+ name := path.Base(g.h.Name)
+ if fi.IsDir() {
+ name += "/"
+ }
+ if got, want := h2.Name, name; got != want {
+ t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
+ }
+ if got, want := h2.Size, g.h.Size; got != want {
+ t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
+ }
+ if got, want := h2.Mode, g.h.Mode; got != want {
+ t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
+ }
+ if got, want := fi.Mode(), g.fm; got != want {
+ t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
+ }
+ if got, want := h2.ModTime, g.h.ModTime; got != want {
+ t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
+ }
+ if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
+ t.Errorf("i=%d: Sys didn't return original *Header", i)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
new file mode 100644
index 00000000..fc899dc8
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
new file mode 100644
index 00000000..cc9cfaa3
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
new file mode 100644
index 00000000..9bc24b65
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
new file mode 100644
index 00000000..b249bfc5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
@@ -0,0 +1 @@
+Kilts
\ No newline at end of file
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
new file mode 100644
index 00000000..394ee3ec
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
@@ -0,0 +1 @@
+Google.com
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
new file mode 100644
index 00000000..8bd4e74d
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
new file mode 100644
index 00000000..59e2d4e6
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
new file mode 100644
index 00000000..29679d9a
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
new file mode 100644
index 00000000..eb65fc94
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
new file mode 100644
index 00000000..5960ee82
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
new file mode 100644
index 00000000..753e883c
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
new file mode 100644
index 00000000..e6d816ad
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
new file mode 100644
index 00000000..9701950e
Binary files /dev/null and b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar differ
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
new file mode 100644
index 00000000..dafb2cab
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
@@ -0,0 +1,396 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
+ errNameTooLong = errors.New("archive/tar: name too long")
+ errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+ w io.Writer
+ err error
+ nb int64 // number of unwritten bytes for current file entry
+ pad int64 // amount of padding to write after current file entry
+ closed bool
+ usedBinary bool // whether the binary numeric field extension was used
+ preferPax bool // use pax header instead of binary numeric header
+ hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
+ paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+ if tw.nb > 0 {
+ tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+ return tw.err
+ }
+
+ n := tw.nb + tw.pad
+ for n > 0 && tw.err == nil {
+ nr := n
+ if nr > blockSize {
+ nr = blockSize
+ }
+ var nw int
+ nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+ n -= int64(nw)
+ }
+ tw.nb = 0
+ tw.pad = 0
+ return tw.err
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+// If the value is too long for the field and allowPax is true add a paxheader record instead
+func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
+ needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
+ if needsPaxHeader {
+ paxHeaders[paxKeyword] = s
+ return
+ }
+ if len(s) > len(b) {
+ if tw.err == nil {
+ tw.err = ErrFieldTooLong
+ }
+ return
+ }
+ ascii := toASCII(s)
+ copy(b, ascii)
+ if len(ascii) < len(b) {
+ b[len(ascii)] = 0
+ }
+}
+
+// Encode x as an octal ASCII string and write it into b with leading zeros.
+func (tw *Writer) octal(b []byte, x int64) {
+ s := strconv.FormatInt(x, 8)
+ // leading zeros, but leave room for a NUL.
+ for len(s)+1 < len(b) {
+ s = "0" + s
+ }
+ tw.cString(b, s, false, paxNone, nil)
+}
+
+// Write x into b, either as octal or as binary (GNUtar/star extension).
+// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
+func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
+ // Try octal first.
+ s := strconv.FormatInt(x, 8)
+ if len(s) < len(b) {
+ tw.octal(b, x)
+ return
+ }
+
+ // If it is too long for octal, and pax is preferred, use a pax header
+ if allowPax && tw.preferPax {
+ tw.octal(b, 0)
+ s := strconv.FormatInt(x, 10)
+ paxHeaders[paxKeyword] = s
+ return
+ }
+
+ // Too big: use binary (big-endian).
+ tw.usedBinary = true
+ for i := len(b) - 1; x > 0 && i >= 0; i-- {
+ b[i] = byte(x)
+ x >>= 8
+ }
+ b[0] |= 0x80 // highest bit indicates binary format
+}
+
+var (
+ minTime = time.Unix(0, 0)
+ // There is room for 11 octal digits (33 bits) of mtime.
+ maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+ return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+ if tw.closed {
+ return ErrWriteAfterClose
+ }
+ if tw.err == nil {
+ tw.Flush()
+ }
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // a map to hold pax header records, if any are needed
+ paxHeaders := make(map[string]string)
+
+ // TODO(shanemhansen): we might want to use PAX headers for
+ // subsecond time resolution, but for now let's just capture
+ // too long fields or non ascii characters
+
+ var header []byte
+
+ // We need to select which scratch buffer to use carefully,
+ // since this method is called recursively to write PAX headers.
+ // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+ // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+ // already being used by the non-recursive call, so we must use paxHdrBuff.
+ header = tw.hdrBuff[:]
+ if !allowPax {
+ header = tw.paxHdrBuff[:]
+ }
+ copy(header, zeroBlock)
+ s := slicer(header)
+
+ // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ pathHeaderBytes := s.next(fileNameSize)
+
+ tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
+
+ // Handle out of range ModTime carefully.
+ var modTime int64
+ if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
+ modTime = hdr.ModTime.Unix()
+ }
+
+ tw.octal(s.next(8), hdr.Mode) // 100:108
+ tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
+ tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
+ tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
+ tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
+ s.next(8) // chksum (148:156)
+ s.next(1)[0] = hdr.Typeflag // 156:157
+
+ tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
+
+ copy(s.next(8), []byte("ustar\x0000")) // 257:265
+ tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
+ tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
+ tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
+ tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
+
+ // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+ prefixHeaderBytes := s.next(155)
+ tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
+
+ // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
+ if tw.usedBinary {
+ copy(header[257:265], []byte("ustar \x00"))
+ }
+
+ _, paxPathUsed := paxHeaders[paxPath]
+ // try to use a ustar header when only the name is too long
+ if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+ suffix := hdr.Name
+ prefix := ""
+ if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
+ var err error
+ prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
+ if err == nil {
+ // ok we can use a ustar long name instead of pax, now correct the fields
+
+ // remove the path field from the pax header. this will suppress the pax header
+ delete(paxHeaders, paxPath)
+
+ // update the path fields
+ tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
+ tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
+
+ // Use the ustar magic if we used ustar long names.
+ if len(prefix) > 0 && !tw.usedBinary {
+ copy(header[257:265], []byte("ustar\x00"))
+ }
+ }
+ }
+ }
+
+ // The chksum field is terminated by a NUL and a space.
+ // This is different from the other octal fields.
+ chksum, _ := checksum(header)
+ tw.octal(header[148:155], chksum)
+ header[155] = ' '
+
+ if tw.err != nil {
+ // problem with header; probably integer too big for a field.
+ return tw.err
+ }
+
+ if allowPax {
+ for k, v := range hdr.Xattrs {
+ paxHeaders[paxXattr+k] = v
+ }
+ }
+
+ if len(paxHeaders) > 0 {
+ if !allowPax {
+ return errInvalidHeader
+ }
+ if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+ return err
+ }
+ }
+ tw.nb = int64(hdr.Size)
+ tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+ _, tw.err = tw.w.Write(header)
+ return tw.err
+}
+
+// writeUSTARLongName splits a USTAR long name hdr.Name.
+// name must be < 256 characters. errNameTooLong is returned
+// if hdr.Name can't be split. The splitting heuristic
+// is compatible with gnu tar.
+func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
+ length := len(name)
+ if length > fileNamePrefixSize+1 {
+ length = fileNamePrefixSize + 1
+ } else if name[length-1] == '/' {
+ length--
+ }
+ i := strings.LastIndex(name[:length], "/")
+ // nlen contains the resulting length in the name field.
+ // plen contains the resulting length in the prefix field.
+ nlen := len(name) - i - 1
+ plen := i
+ if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
+ err = errNameTooLong
+ return
+ }
+ prefix, suffix = name[:i], name[i+1:]
+ return
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+ // Prepare extended header
+ ext := new(Header)
+ ext.Typeflag = TypeXHeader
+ // Setting ModTime is required for reader parsing to
+ // succeed, and seems harmless enough.
+ ext.ModTime = hdr.ModTime
+ // The spec asks that we namespace our pseudo files
+ // with the current pid.
+ pid := os.Getpid()
+ dir, file := path.Split(hdr.Name)
+ fullName := path.Join(dir,
+ fmt.Sprintf("PaxHeaders.%d", pid), file)
+
+ ascii := toASCII(fullName)
+ if len(ascii) > 100 {
+ ascii = ascii[:100]
+ }
+ ext.Name = ascii
+ // Construct the body
+ var buf bytes.Buffer
+
+ for k, v := range paxHeaders {
+ fmt.Fprint(&buf, paxHeader(k+"="+v))
+ }
+
+ ext.Size = int64(len(buf.Bytes()))
+ if err := tw.writeHeader(ext, false); err != nil {
+ return err
+ }
+ if _, err := tw.Write(buf.Bytes()); err != nil {
+ return err
+ }
+ if err := tw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// paxHeader formats a single pax record, prefixing it with the appropriate length
+func paxHeader(msg string) string {
+ const padding = 2 // Extra padding for space and newline
+ size := len(msg) + padding
+ size += len(strconv.Itoa(size))
+ record := fmt.Sprintf("%d %s\n", size, msg)
+ if len(record) != size {
+ // Final adjustment if adding size increased
+ // the number of digits in size
+ size = len(record)
+ record = fmt.Sprintf("%d %s\n", size, msg)
+ }
+ return record
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+ if tw.closed {
+ err = ErrWriteTooLong
+ return
+ }
+ overwrite := false
+ if int64(len(b)) > tw.nb {
+ b = b[0:tw.nb]
+ overwrite = true
+ }
+ n, err = tw.w.Write(b)
+ tw.nb -= int64(n)
+ if err == nil && overwrite {
+ err = ErrWriteTooLong
+ return
+ }
+ tw.err = err
+ return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+ if tw.err != nil || tw.closed {
+ return tw.err
+ }
+ tw.Flush()
+ tw.closed = true
+ if tw.err != nil {
+ return tw.err
+ }
+
+ // trailer: two zero blocks
+ for i := 0; i < 2; i++ {
+ _, tw.err = tw.w.Write(zeroBlock)
+ if tw.err != nil {
+ break
+ }
+ }
+ return tw.err
+}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
new file mode 100644
index 00000000..5e42e322
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
@@ -0,0 +1,491 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "testing/iotest"
+ "time"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ // The writer test file was produced with this command:
+ // tar (GNU tar) 1.26
+ // ln -s small.txt link.txt
+ // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
+ {
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ ModTime: time.Unix(1246508266, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ {
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ ModTime: time.Unix(1245217492, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ {
+ header: &Header{
+ Name: "link.txt",
+ Mode: 0777,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 0,
+ ModTime: time.Unix(1314603082, 0),
+ Typeflag: '2',
+ Linkname: "small.txt",
+ Uname: "strings",
+ Gname: "strings",
+ },
+ // no contents
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ {
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ ModTime: time.Unix(1254699560, 0),
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // fake contents
+ contents: strings.Repeat("\x00", 4<<10),
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
+ // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
+ {
+ file: "testdata/writer-big-long.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: strings.Repeat("longname/", 15) + "16gig.txt",
+ Mode: 0644,
+ Uid: 1000,
+ Gid: 1000,
+ Size: 16 << 30,
+ ModTime: time.Unix(1399583047, 0),
+ Typeflag: '0',
+ Uname: "guillaume",
+ Gname: "guillaume",
+ },
+ // fake contents
+ contents: strings.Repeat("\x00", 4<<10),
+ },
+ },
+ },
+ // This file was produced using gnu tar 1.17
+ // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
+ {
+ file: "testdata/ustar.tar",
+ entries: []*writerTestEntry{
+ {
+ header: &Header{
+ Name: strings.Repeat("longname/", 15) + "file.txt",
+ Mode: 0644,
+ Uid: 0765,
+ Gid: 024,
+ Size: 06,
+ ModTime: time.Unix(1360135598, 0),
+ Typeflag: '0',
+ Uname: "shane",
+ Gname: "staff",
+ },
+ contents: "hello\n",
+ },
+ },
+ },
+}
+
+// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
+func bytestr(offset int, b []byte) string {
+ const rowLen = 32
+ s := fmt.Sprintf("%04x ", offset)
+ for _, ch := range b {
+ switch {
+ case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
+ s += fmt.Sprintf(" %c", ch)
+ default:
+ s += fmt.Sprintf(" %02x", ch)
+ }
+ }
+ return s
+}
+
+// Render a pseudo-diff between two blocks of bytes.
+func bytediff(a []byte, b []byte) string {
+ const rowLen = 32
+ s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
+ for offset := 0; len(a)+len(b) > 0; offset += rowLen {
+ na, nb := rowLen, rowLen
+ if na > len(a) {
+ na = len(a)
+ }
+ if nb > len(b) {
+ nb = len(b)
+ }
+ sa := bytestr(offset, a[0:na])
+ sb := bytestr(offset, b[0:nb])
+ if sa != sb {
+ s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
+ }
+ a = a[na:]
+ b = b[nb:]
+ }
+ return s
+}
+
+func TestWriter(t *testing.T) {
+testLoop:
+ for i, test := range writerTests {
+ expected, err := ioutil.ReadFile(test.file)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+
+ buf := new(bytes.Buffer)
+ tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
+ big := false
+ for j, entry := range test.entries {
+ big = big || entry.header.Size > 1<<10
+ if err := tw.WriteHeader(entry.header); err != nil {
+ t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
+ continue testLoop
+ }
+ if _, err := io.WriteString(tw, entry.contents); err != nil {
+ t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
+ continue testLoop
+ }
+ }
+ // Only interested in Close failures for the small tests.
+ if err := tw.Close(); err != nil && !big {
+ t.Errorf("test %d: Failed closing archive: %v", i, err)
+ continue testLoop
+ }
+
+ actual := buf.Bytes()
+ if !bytes.Equal(expected, actual) {
+ t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
+ i, bytediff(expected, actual))
+ }
+ if testing.Short() { // The second test is expensive.
+ break
+ }
+ }
+}
+
+func TestPax(t *testing.T) {
+ // Create an archive with a large name
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat: %v", err)
+ }
+ // Force a PAX long name to be written
+ longName := strings.Repeat("ab", 100)
+ contents := strings.Repeat(" ", int(hdr.Size))
+ hdr.Name = longName
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != longName {
+ t.Fatal("Couldn't recover long file name")
+ }
+}
+
+func TestPaxSymlink(t *testing.T) {
+ // Create an archive with a large linkname
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ hdr.Typeflag = TypeSymlink
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+ // Force a PAX long linkname to be written
+ longLinkname := strings.Repeat("1234567890/1234567890", 10)
+ hdr.Linkname = longLinkname
+
+ hdr.Size = 0
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Linkname != longLinkname {
+ t.Fatal("Couldn't recover long link name")
+ }
+}
+
+func TestPaxNonAscii(t *testing.T) {
+ // Create an archive with non ascii. These should trigger a pax header
+ // because pax headers have a defined utf-8 encoding.
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+
+ // some sample data
+ chineseFilename := "文件名"
+ chineseGroupname := "組"
+ chineseUsername := "用戶名"
+
+ hdr.Name = chineseFilename
+ hdr.Gname = chineseGroupname
+ hdr.Uname = chineseUsername
+
+ contents := strings.Repeat(" ", int(hdr.Size))
+
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Simple test to make sure PAX extensions are in effect
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
+ t.Fatal("Expected at least one PAX header to be written.")
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != chineseFilename {
+ t.Fatal("Couldn't recover unicode name")
+ }
+ if hdr.Gname != chineseGroupname {
+ t.Fatal("Couldn't recover unicode group")
+ }
+ if hdr.Uname != chineseUsername {
+ t.Fatal("Couldn't recover unicode user")
+ }
+}
+
+func TestPaxXattrs(t *testing.T) {
+ xattrs := map[string]string{
+ "user.key": "value",
+ }
+
+ // Create an archive with an xattr
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ if err != nil {
+ t.Fatalf("os.Stat: %v", err)
+ }
+ contents := "Kilts"
+ hdr.Xattrs = xattrs
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = writer.Write([]byte(contents)); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Test that we can get the xattrs back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
+ t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
+ hdr.Xattrs, xattrs)
+ }
+}
+
+func TestPAXHeader(t *testing.T) {
+ medName := strings.Repeat("CD", 50)
+ longName := strings.Repeat("AB", 100)
+ paxTests := [][2]string{
+ {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
+ {"a=b", "6 a=b\n"}, // Single digit length
+ {"a=names", "11 a=names\n"}, // Test case involving carries
+ {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
+ {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
+
+ for _, test := range paxTests {
+ key, expected := test[0], test[1]
+ if result := paxHeader(key); result != expected {
+ t.Fatalf("paxHeader: got %s, expected %s", result, expected)
+ }
+ }
+}
+
+func TestUSTARLongName(t *testing.T) {
+ // Create an archive with a path that failed to split with USTAR extension in previous versions.
+ fileinfo, err := os.Stat("testdata/small.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdr, err := FileInfoHeader(fileinfo, "")
+ hdr.Typeflag = TypeDir
+ if err != nil {
+ t.Fatalf("os.Stat:1 %v", err)
+ }
+ // Force a PAX long name to be written. The name was taken from a practical example
+ // that fails and replaced ever char through numbers to anonymize the sample.
+ longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
+ hdr.Name = longName
+
+ hdr.Size = 0
+ var buf bytes.Buffer
+ writer := NewWriter(&buf)
+ if err := writer.WriteHeader(hdr); err != nil {
+ t.Fatal(err)
+ }
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Test that we can get a long name back out of the archive.
+ reader := NewReader(&buf)
+ hdr, err = reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name != longName {
+ t.Fatal("Couldn't recover long name")
+ }
+}
+
+func TestValidTypeflagWithPAXHeader(t *testing.T) {
+ var buffer bytes.Buffer
+ tw := NewWriter(&buffer)
+
+ fileName := strings.Repeat("ab", 100)
+
+ hdr := &Header{
+ Name: fileName,
+ Size: 4,
+ Typeflag: 0,
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ t.Fatalf("Failed to write header: %s", err)
+ }
+ if _, err := tw.Write([]byte("fooo")); err != nil {
+ t.Fatalf("Failed to write the file's data: %s", err)
+ }
+ tw.Close()
+
+ tr := NewReader(&buffer)
+
+ for {
+ header, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatalf("Failed to read header: %s", err)
+ }
+ if header.Typeflag != 0 {
+ t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md
new file mode 100644
index 00000000..05be0f8a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to libtrust
+
+Want to hack on libtrust? Awesome! Here are instructions to get you
+started.
+
+libtrust is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read
+[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
+
+Happy hacking!
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE
new file mode 100644
index 00000000..27448585
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS
new file mode 100644
index 00000000..9768175f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS
@@ -0,0 +1,3 @@
+Solomon Hykes
+Josh Hawn (github: jlhawn)
+Derek McGowan (github: dmcgowan)
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/README.md
new file mode 100644
index 00000000..8e7db381
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/README.md
@@ -0,0 +1,18 @@
+# libtrust
+
+Libtrust is library for managing authentication and authorization using public key cryptography.
+
+Authentication is handled using the identity attached to the public key.
+Libtrust provides multiple methods to prove possession of the private key associated with an identity.
+ - TLS x509 certificates
+ - Signature verification
+ - Key Challenge
+
+Authorization and access control is managed through a distributed trust graph.
+Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
+
+## Copyright and license
+
+Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
+Docs released under Creative commons.
+
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go
new file mode 100644
index 00000000..3dcca33c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "time"
+)
+
+type certTemplateInfo struct {
+ commonName string
+ domains []string
+ ipAddresses []net.IP
+ isCA bool
+ clientAuth bool
+ serverAuth bool
+}
+
+func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
+ // Generate a certificate template which is valid from the past week to
+ // 10 years from now. The usage of the certificate depends on the
+ // specified fields in the given certTempInfo object.
+ var (
+ keyUsage x509.KeyUsage
+ extKeyUsage []x509.ExtKeyUsage
+ )
+
+ if info.isCA {
+ keyUsage = x509.KeyUsageCertSign
+ }
+
+ if info.clientAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
+ }
+
+ if info.serverAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
+ }
+
+ return &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: info.commonName,
+ },
+ NotBefore: time.Now().Add(-time.Hour * 24 * 7),
+ NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
+ DNSNames: info.domains,
+ IPAddresses: info.ipAddresses,
+ IsCA: info.isCA,
+ KeyUsage: keyUsage,
+ ExtKeyUsage: extKeyUsage,
+ BasicConstraintsValid: info.isCA,
+ }
+}
+
+func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
+ pubCertTemplate := generateCertTemplate(subInfo)
+ privCertTemplate := generateCertTemplate(issInfo)
+
+ certDER, err := x509.CreateCertificate(
+ rand.Reader, pubCertTemplate, privCertTemplate,
+ pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create certificate: %s", err)
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate: %s", err)
+ }
+
+ return
+}
+
+// GenerateSelfSignedServerCert creates a self-signed certificate for the
+// given key which is to be used for TLS servers with the given domains and
+// IP addresses.
+func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ domains: domains,
+ ipAddresses: ipAddresses,
+ serverAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateSelfSignedClientCert creates a self-signed certificate for the
+// given key which is to be used for TLS clients.
+func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ clientAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateCACert creates a certificate which can be used as a trusted
+// certificate authority.
+func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
+ subjectInfo := &certTemplateInfo{
+ commonName: trustedKey.KeyID(),
+ isCA: true,
+ }
+ issuerInfo := &certTemplateInfo{
+ commonName: signer.KeyID(),
+ }
+
+ return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
+}
+
+// GenerateCACertPool creates a certificate authority pool to be used for a
+// TLS configuration. Any self-signed certificates issued by the specified
+// trusted keys will be verified during a TLS handshake
+func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
+ certPool := x509.NewCertPool()
+
+ for _, trustedKey := range trustedKeys {
+ cert, err := GenerateCACert(signer, trustedKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
+ }
+
+ certPool.AddCert(cert)
+ }
+
+ return certPool, nil
+}
+
+// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ certificates := []*x509.Certificate{}
+ var block *pem.Block
+ block, b = pem.Decode(b)
+ for ; block != nil; block, b = pem.Decode(b) {
+ if block.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ } else {
+ return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
+ }
+ }
+
+ return certificates, nil
+}
+
+// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificatePool(filename string) (*x509.CertPool, error) {
+ certs, err := LoadCertificateBundle(filename)
+ if err != nil {
+ return nil, err
+ }
+ pool := x509.NewCertPool()
+ for _, cert := range certs {
+ pool.AddCert(cert)
+ }
+ return pool, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go
new file mode 100644
index 00000000..c111f353
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go
@@ -0,0 +1,111 @@
+package libtrust
+
+import (
+ "encoding/pem"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "testing"
+)
+
+func TestGenerateCertificates(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = GenerateSelfSignedClientCert(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGenerateCACertPool(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ caKey1, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ caKey2, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()})
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLoadCertificates(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ caKey1, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+ caKey2, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cert1, err := GenerateCACert(caKey1, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cert2, err := GenerateCACert(caKey2, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ d, err := ioutil.TempDir("/tmp", "cert-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ caFile := path.Join(d, "ca.pem")
+ f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw})
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw})
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ certs, err := LoadCertificateBundle(caFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(certs) != 2 {
+ t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs))
+ }
+
+ pool, err := LoadCertificatePool(caFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(pool.Subjects()) != 2 {
+ t.Fatalf("Invalid certificate pool")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go
new file mode 100644
index 00000000..ec5d2159
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go
@@ -0,0 +1,9 @@
+/*
+Package libtrust provides an interface for managing authentication and
+authorization using public key cryptography. Authentication is handled
+using the identity attached to the public key and verified through TLS
+x509 certificates, a key challenge, or signature. Authorization and
+access control is managed through a trust graph distributed between
+both remote trust servers and locally cached and managed data.
+*/
+package libtrust
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go
new file mode 100644
index 00000000..00bbe4b3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go
@@ -0,0 +1,428 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * EC DSA PUBLIC KEY
+ */
+
+// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
+// signature algorithms.
+type ecPublicKey struct {
+ *ecdsa.PublicKey
+ curveName string
+ signatureAlgorithm *signatureAlgorithm
+ extended map[string]interface{}
+}
+
+func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
+ curve := cryptoPublicKey.Curve
+
+ switch {
+ case curve == elliptic.P256():
+ return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
+ case curve == elliptic.P384():
+ return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
+ case curve == elliptic.P521():
+ return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
+ default:
+ return nil, errors.New("unsupported elliptic curve")
+ }
+}
+
+// KeyType returns the key type for elliptic curve keys, i.e., "EC".
+func (k *ecPublicKey) KeyType() string {
+ return "EC"
+}
+
+// CurveName returns the elliptic curve identifier.
+// Possible values are "P-256", "P-384", and "P-521".
+func (k *ecPublicKey) CurveName() string {
+ return k.curveName
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *ecPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *ecPublicKey) String() string {
+ return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this
+// PublicKey. The alg parameter should identify the digital signature
+// algorithm which was used to produce the signature and should be supported
+// by this public key. Returns a nil error if the signature is valid.
+func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // For EC keys there is only one supported signature algorithm depending
+ // on the curve parameters.
+ if k.signatureAlgorithm.HeaderParam() != alg {
+ return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
+ }
+
+ // signature is the concatenation of (r, s), base64Url encoded.
+ sigLength := len(signature)
+ expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
+ if sigLength != expectedOctetLength {
+ return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
+ }
+
+ rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
+ r := new(big.Int).SetBytes(rBytes)
+ s := new(big.Int).SetBytes(sBytes)
+
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err := io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ if !ecdsa.Verify(k.PublicKey, hash, r, s) {
+ return errors.New("invalid signature")
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *ecPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["crv"] = k.CurveName()
+
+ xBytes := k.X.Bytes()
+ yBytes := k.Y.Bytes()
+ octetLength := (k.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output so that x, y are each
+ // *octetLength* bytes long.
+ xBuf := make([]byte, octetLength-len(xBytes), octetLength)
+ yBuf := make([]byte, octetLength-len(yBytes), octetLength)
+ xBuf = append(xBuf, xBytes...)
+ yBuf = append(yBuf, yBytes...)
+
+ jwk["x"] = joseBase64UrlEncode(xBuf)
+ jwk["y"] = joseBase64UrlEncode(yBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *ecPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract 'crv', 'x', 'y', and 'kid' and check for
+ // consistency.
+
+ // Get the curve identifier value.
+ crv, err := stringFromMap(jwk, "crv")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
+ }
+
+ var (
+ curve elliptic.Curve
+ sigAlg *signatureAlgorithm
+ )
+
+ switch {
+ case crv == "P-256":
+ curve = elliptic.P256()
+ sigAlg = es256
+ case crv == "P-384":
+ curve = elliptic.P384()
+ sigAlg = es384
+ case crv == "P-521":
+ curve = elliptic.P521()
+ sigAlg = es512
+ default:
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
+ }
+
+ // Get the X and Y coordinates for the public key point.
+ xB64Url, err := stringFromMap(jwk, "x")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+ x, err := parseECCoordinate(xB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+
+ yB64Url, err := stringFromMap(jwk, "y")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+ y, err := parseECCoordinate(yB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+
+ key := &ecPublicKey{
+ PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
+ curveName: crv, signatureAlgorithm: sigAlg,
+ }
+
+ // Key ID is optional too, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
+ }
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * EC DSA PRIVATE KEY
+ */
+
+// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
+// algorithms.
+type ecPrivateKey struct {
+ ecPublicKey
+ *ecdsa.PrivateKey
+}
+
+func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
+ publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *ecPrivateKey) PublicKey() PublicKey {
+ return &k.ecPublicKey
+}
+
+func (k *ecPrivateKey) String() string {
+ return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the elliptic curve private key. If the specified hashing algorithm is
+// supported by this key, that hash function is used to generate the signature
+// otherwise the the default hashing algorithm for this key is used. Returns
+// the signature and the name of the JWK signature algorithm used, e.g.,
+// "ES256", "ES384", "ES512".
+func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ // The given hashId is only a suggestion, and since EC keys only support
+ // on signature/hash algorithm given the curve name, we disregard it for
+ // the elliptic curve JWK signature implementation.
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+ rBytes, sBytes := r.Bytes(), s.Bytes()
+ octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output
+ rBuf := make([]byte, octetLength-len(rBytes), octetLength)
+ sBuf := make([]byte, octetLength-len(sBytes), octetLength)
+
+ rBuf = append(rBuf, rBytes...)
+ sBuf = append(sBuf, sBytes...)
+
+ signature = append(rBuf, sBuf...)
+ alg = k.signatureAlgorithm.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *ecPrivateKey) toMap() map[string]interface{} {
+ jwk := k.ecPublicKey.toMap()
+
+ dBytes := k.D.Bytes()
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := k.ecPublicKey.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ // Create a buffer with the necessary zero-padding.
+ dBuf := make([]byte, octetLength-len(dBytes), octetLength)
+ dBuf = append(dBuf, dBytes...)
+
+ jwk["d"] = joseBase64UrlEncode(dBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
+}
+
+func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
+ dB64Url, err := stringFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key: %s", err)
+ }
+
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract the public key information, then extract the private
+ // key value 'd'.
+ publicKey, err := ecPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
+ }
+
+ key := &ecPrivateKey{
+ ecPublicKey: *publicKey,
+ PrivateKey: &ecdsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: d,
+ },
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
+ k = new(ecPrivateKey)
+ k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
+func GenerateECP256PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
+ }
+
+ k.curveName = "P-256"
+ k.signatureAlgorithm = es256
+
+ return k, nil
+}
+
+// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
+func GenerateECP384PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P384())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
+ }
+
+ k.curveName = "P-384"
+ k.signatureAlgorithm = es384
+
+ return k, nil
+}
+
+// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
+func GenerateECP521PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P521())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
+ }
+
+ k.curveName = "P-521"
+ k.signatureAlgorithm = es512
+
+ return k, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go
new file mode 100644
index 00000000..26ac3814
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go
@@ -0,0 +1,157 @@
+package libtrust
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+)
+
+func generateECTestKeys(t *testing.T) []PrivateKey {
+ p256Key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ p384Key, err := GenerateECP384PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ p521Key, err := GenerateECP521PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return []PrivateKey{p256Key, p384Key, p521Key}
+}
+
+func TestECKeys(t *testing.T) {
+ ecKeys := generateECTestKeys(t)
+
+ for _, ecKey := range ecKeys {
+ if ecKey.KeyType() != "EC" {
+ t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType())
+ }
+ }
+}
+
+func TestECSignVerify(t *testing.T) {
+ ecKeys := generateECTestKeys(t)
+
+ message := "Hello, World!"
+ data := bytes.NewReader([]byte(message))
+
+ sigAlgs := []*signatureAlgorithm{es256, es384, es512}
+
+ for i, ecKey := range ecKeys {
+ sigAlg := sigAlgs[i]
+
+ t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID())
+
+ data.Seek(0, 0) // Reset the byte reader
+
+ // Sign
+ sig, alg, err := ecKey.Sign(data, sigAlg.HashID())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data.Seek(0, 0) // Reset the byte reader
+
+ // Verify
+ err = ecKey.Verify(data, alg, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalECKeys(t *testing.T) {
+ ecKeys := generateECTestKeys(t)
+ data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test."))
+ sigAlgs := []*signatureAlgorithm{es256, es384, es512}
+
+ for i, ecKey := range ecKeys {
+ sigAlg := sigAlgs[i]
+ privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Logf("JWK Private Key: %s", string(privateJWKJSON))
+ t.Logf("JWK Public Key: %s", string(publicJWKJSON))
+
+ privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Ensure we can sign/verify a message with the unmarshalled keys.
+ data.Seek(0, 0) // Reset the byte reader
+ signature, alg, err := privKey2.Sign(data, sigAlg.HashID())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data.Seek(0, 0) // Reset the byte reader
+ err = pubKey2.Verify(data, alg, signature)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestFromCryptoECKeys(t *testing.T) {
+ ecKeys := generateECTestKeys(t)
+
+ for _, ecKey := range ecKeys {
+ cryptoPrivateKey := ecKey.CryptoPrivateKey()
+ cryptoPublicKey := ecKey.CryptoPublicKey()
+
+ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if pubKey.KeyID() != ecKey.KeyID() {
+ t.Fatal("public key key ID mismatch")
+ }
+
+ privKey, err := FromCryptoPrivateKey(cryptoPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if privKey.KeyID() != ecKey.KeyID() {
+ t.Fatal("public key key ID mismatch")
+ }
+ }
+}
+
+func TestExtendedFields(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key.AddExtendedField("test", "foobar")
+ val := key.GetExtendedField("test")
+
+ gotVal, ok := val.(string)
+ if !ok {
+ t.Fatalf("value is not a string")
+ } else if gotVal != val {
+ t.Fatalf("value %q is not equal to %q", gotVal, val)
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go
new file mode 100644
index 00000000..5b2b4fca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go
@@ -0,0 +1,50 @@
+package libtrust
+
+import (
+ "path/filepath"
+)
+
+// FilterByHosts filters the list of PublicKeys to only those which contain a
+// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
+// then keys which do not specify any hosts are also returned.
+func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
+ filtered := make([]PublicKey, 0, len(keys))
+
+ for _, pubKey := range keys {
+ var hosts []string
+ switch v := pubKey.GetExtendedField("hosts").(type) {
+ case []string:
+ hosts = v
+ case []interface{}:
+ for _, value := range v {
+ h, ok := value.(string)
+ if !ok {
+ continue
+ }
+ hosts = append(hosts, h)
+ }
+ }
+
+ if len(hosts) == 0 {
+ if includeEmpty {
+ filtered = append(filtered, pubKey)
+ }
+ continue
+ }
+
+ // Check if any hosts match pattern
+ for _, hostPattern := range hosts {
+ match, err := filepath.Match(hostPattern, host)
+ if err != nil {
+ return nil, err
+ }
+
+ if match {
+ filtered = append(filtered, pubKey)
+ continue
+ }
+ }
+ }
+
+ return filtered, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go
new file mode 100644
index 00000000..997e554c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go
@@ -0,0 +1,81 @@
+package libtrust
+
+import (
+ "testing"
+)
+
+func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) {
+ if len(sliceA) != len(sliceB) {
+ t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB))
+ }
+
+ for i, itemA := range sliceA {
+ itemB := sliceB[i]
+ if itemA != itemB {
+ t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB)
+ }
+ }
+}
+
+func TestFilter(t *testing.T) {
+ keys := make([]PublicKey, 0, 8)
+
+ // Create 8 keys and add host entries.
+ for i := 0; i < cap(keys); i++ {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // we use both []interface{} and []string here because jwt uses
+ // []interface{} format, while PEM uses []string
+ switch {
+ case i == 0:
+ // Don't add entries for this key, key 0.
+ break
+ case i%2 == 0:
+ // Should catch keys 2, 4, and 6.
+ key.AddExtendedField("hosts", []interface{}{"*.even.example.com"})
+ case i == 7:
+ // Should catch only the last key, and make it match any hostname.
+ key.AddExtendedField("hosts", []string{"*"})
+ default:
+ // should catch keys 1, 3, 5.
+ key.AddExtendedField("hosts", []string{"*.example.com"})
+ }
+
+ keys = append(keys, key)
+ }
+
+ // Should match 2 keys, the empty one, and the one that matches all hosts.
+ matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedMatch := []PublicKey{keys[0], keys[7]}
+ compareKeySlices(t, expectedMatch, matchedKeys)
+
+ // Should match 1 key, the one that matches any host.
+ matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedMatch = []PublicKey{keys[7]}
+ compareKeySlices(t, expectedMatch, matchedKeys)
+
+ // Should match keys that end in "example.com", and the key that matches anything.
+ matchedKeys, err = FilterByHosts(keys, "foo.example.com", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]}
+ compareKeySlices(t, expectedMatch, matchedKeys)
+
+ // Should match all of the keys except the empty key.
+ matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedMatch = keys[1:]
+ compareKeySlices(t, expectedMatch, matchedKeys)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go
new file mode 100644
index 00000000..a2df787d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go
@@ -0,0 +1,56 @@
+package libtrust
+
+import (
+ "crypto"
+ _ "crypto/sha256" // Registrer SHA224 and SHA256
+ _ "crypto/sha512" // Registrer SHA384 and SHA512
+ "fmt"
+)
+
+type signatureAlgorithm struct {
+ algHeaderParam string
+ hashID crypto.Hash
+}
+
+func (h *signatureAlgorithm) HeaderParam() string {
+ return h.algHeaderParam
+}
+
+func (h *signatureAlgorithm) HashID() crypto.Hash {
+ return h.hashID
+}
+
+var (
+ rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
+ rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
+ rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
+ es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
+ es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
+ es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
+)
+
+func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
+ switch {
+ case alg == "RS256":
+ return rs256, nil
+ case alg == "RS384":
+ return rs384, nil
+ case alg == "RS512":
+ return rs512, nil
+ default:
+ return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
+ }
+}
+
+func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
+ switch {
+ case hashID == crypto.SHA512:
+ return rs512
+ case hashID == crypto.SHA384:
+ return rs384
+ case hashID == crypto.SHA256:
+ fallthrough
+ default:
+ return rs256
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go
new file mode 100644
index 00000000..8d84f6dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go
@@ -0,0 +1,582 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+ "unicode"
+)
+
+var (
+ // ErrInvalidSignContent is used when the content to be signed is invalid.
+ ErrInvalidSignContent = errors.New("invalid sign content")
+
+ // ErrInvalidJSONContent is used when invalid json is encountered.
+ ErrInvalidJSONContent = errors.New("invalid json content")
+
+ // ErrMissingSignatureKey is used when the specified signature key
+ // does not exist in the JSON content.
+ ErrMissingSignatureKey = errors.New("missing signature key")
+)
+
+type jsHeader struct {
+ JWK PublicKey `json:"jwk,omitempty"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c,omitempty"`
+}
+
+type jsSignature struct {
+ Header *jsHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected,omitempty"`
+}
+
+type signKey struct {
+ PrivateKey
+ Chain []*x509.Certificate
+}
+
+// JSONSignature represents a signature of a json object.
+type JSONSignature struct {
+ payload string
+ signatures []*jsSignature
+ indent string
+ formatLength int
+ formatTail []byte
+}
+
+func newJSONSignature() *JSONSignature {
+ return &JSONSignature{
+ signatures: make([]*jsSignature, 0, 1),
+ }
+}
+
+// Payload returns the encoded payload of the signature. This
+// payload should not be signed directly
+func (js *JSONSignature) Payload() ([]byte, error) {
+ return joseBase64UrlDecode(js.payload)
+}
+
+func (js *JSONSignature) protectedHeader() (string, error) {
+ protected := map[string]interface{}{
+ "formatLength": js.formatLength,
+ "formatTail": joseBase64UrlEncode(js.formatTail),
+ "time": time.Now().UTC().Format(time.RFC3339),
+ }
+ protectedBytes, err := json.Marshal(protected)
+ if err != nil {
+ return "", err
+ }
+
+ return joseBase64UrlEncode(protectedBytes), nil
+}
+
+func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
+ buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
+ copy(buf, protectedHeader)
+ buf[len(protectedHeader)] = '.'
+ copy(buf[len(protectedHeader)+1:], js.payload)
+ return buf, nil
+}
+
+// Sign adds a signature using the given private key.
+func (js *JSONSignature) Sign(key PrivateKey) error {
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ header := &jsHeader{
+ JWK: key.PublicKey(),
+ Algorithm: algorithm,
+ }
+ sig := &jsSignature{
+ Header: header,
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ }
+
+ js.signatures = append(js.signatures, sig)
+
+ return nil
+}
+
+// SignWithChain adds a signature using the given private key
+// and setting the x509 chain. The public key of the first element
+// in the chain must be the public key corresponding with the sign key.
+func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
+ // Ensure key.Chain[0] is public key for key
+ //key.Chain.PublicKey
+ //key.PublicKey().CryptoPublicKey()
+
+ // Verify chain
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ header := &jsHeader{
+ Chain: make([]string, len(chain)),
+ Algorithm: algorithm,
+ }
+
+ for i, cert := range chain {
+ header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
+ }
+
+ sig := &jsSignature{
+ Header: header,
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ }
+
+ js.signatures = append(js.signatures, sig)
+
+ return nil
+}
+
+// Verify verifies all the signatures and returns the list of
+// public keys used to sign. Any x509 chains are not checked.
+func (js *JSONSignature) Verify() ([]PublicKey, error) {
+ keys := make([]PublicKey, len(js.signatures))
+ for i, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ } else if signature.Header.JWK != nil {
+ publicKey = signature.Header.JWK
+ } else {
+ return nil, errors.New("missing public key")
+ }
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ keys[i] = publicKey
+ }
+ return keys, nil
+}
+
+// VerifyChains verifies all the signatures and the chains associated
+// with each signature and returns the list of verified chains.
+// Signatures without an x509 chain are not checked.
+func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
+ chains := make([][]*x509.Certificate, 0, len(js.signatures))
+ for _, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ intermediates := x509.NewCertPool()
+ if len(signature.Header.Chain) > 1 {
+ intermediateChain := signature.Header.Chain[1:]
+ for i := range intermediateChain {
+ certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
+ if err != nil {
+ return nil, err
+ }
+ intermediate, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ intermediates.AddCert(intermediate)
+ }
+ }
+
+ verifyOptions := x509.VerifyOptions{
+ Intermediates: intermediates,
+ Roots: ca,
+ }
+
+ verifiedChains, err := cert.Verify(verifyOptions)
+ if err != nil {
+ return nil, err
+ }
+ chains = append(chains, verifiedChains...)
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ }
+ return chains, nil
+}
+
+// JWS returns JSON serialized JWS according to
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
+func (js *JSONSignature) JWS() ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("missing signature")
+ }
+ jsonMap := map[string]interface{}{
+ "payload": js.payload,
+ "signatures": js.signatures,
+ }
+
+ return json.MarshalIndent(jsonMap, "", " ")
+}
+
+func notSpace(r rune) bool {
+ return !unicode.IsSpace(r)
+}
+
+func detectJSONIndent(jsonContent []byte) (indent string) {
+ if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
+ quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
+ if quoteIndex > 0 {
+ indent = string(jsonContent[2 : quoteIndex+1])
+ }
+ }
+ return
+}
+
+type jsParsedHeader struct {
+ JWK json.RawMessage `json:"jwk"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c"`
+}
+
+type jsParsedSignature struct {
+ Header *jsParsedHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected"`
+}
+
+// ParseJWS parses a JWS serialized JSON object into a Json Signature.
+func ParseJWS(content []byte) (*JSONSignature, error) {
+ type jsParsed struct {
+ Payload string `json:"payload"`
+ Signatures []*jsParsedSignature `json:"signatures"`
+ }
+ parsed := &jsParsed{}
+ err := json.Unmarshal(content, parsed)
+ if err != nil {
+ return nil, err
+ }
+ if len(parsed.Signatures) == 0 {
+ return nil, errors.New("missing signatures")
+ }
+ payload, err := joseBase64UrlDecode(parsed.Payload)
+ if err != nil {
+ return nil, err
+ }
+
+ js, err := NewJSONSignature(payload)
+ if err != nil {
+ return nil, err
+ }
+ js.signatures = make([]*jsSignature, len(parsed.Signatures))
+ for i, signature := range parsed.Signatures {
+ header := &jsHeader{
+ Algorithm: signature.Header.Algorithm,
+ }
+ if signature.Header.Chain != nil {
+ header.Chain = signature.Header.Chain
+ }
+ if signature.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = &jsSignature{
+ Header: header,
+ Signature: signature.Signature,
+ Protected: signature.Protected,
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignature returns a new unsigned JWS from a json byte array.
+// JSONSignature will need to be signed before serializing or storing.
+func NewJSONSignature(content []byte) (*JSONSignature, error) {
+ var dataMap map[string]interface{}
+ err := json.Unmarshal(content, &dataMap)
+ if err != nil {
+ return nil, err
+ }
+
+ js := newJSONSignature()
+ js.indent = detectJSONIndent(content)
+
+ js.payload = joseBase64UrlEncode(content)
+
+ // Find trailing } and whitespace, put in protected header
+ closeIndex := bytes.LastIndexFunc(content, notSpace)
+ if content[closeIndex] != '}' {
+ return nil, ErrInvalidJSONContent
+ }
+ lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
+ if content[lastRuneIndex] == ',' {
+ return nil, ErrInvalidJSONContent
+ }
+ js.formatLength = lastRuneIndex + 1
+ js.formatTail = content[js.formatLength:]
+
+ return js, nil
+}
+
+// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
+// struct. JWS will need to be signed before serializing or storing.
+func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
+ switch content.(type) {
+ case map[string]interface{}:
+ case struct{}:
+ default:
+ return nil, errors.New("invalid data type")
+ }
+
+ js := newJSONSignature()
+ js.indent = " "
+
+ payload, err := json.MarshalIndent(content, "", js.indent)
+ if err != nil {
+ return nil, err
+ }
+ js.payload = joseBase64UrlEncode(payload)
+
+ // Remove '\n}' from formatted section, put in protected header
+ js.formatLength = len(payload) - 2
+ js.formatTail = payload[js.formatLength:]
+
+ return js, nil
+}
+
+func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
+ value, ok := m[key]
+ if !ok {
+ return 0, false
+ }
+ switch v := value.(type) {
+ case int:
+ return v, true
+ case float64:
+ return int(v), true
+ default:
+ return 0, false
+ }
+}
+
+func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
+ value, ok := m[key]
+ if !ok {
+ return "", false
+ }
+ v, ok = value.(string)
+ return
+}
+
+// ParsePrettySignature parses a formatted signature into a
+// JSON signature. If the signatures are missing the format information
+// an error is thrown. The formatted signature must be created by
+// the same method as format signature.
+func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
+ var contentMap map[string]json.RawMessage
+ err := json.Unmarshal(content, &contentMap)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling content: %s", err)
+ }
+ sigMessage, ok := contentMap[signatureKey]
+ if !ok {
+ return nil, ErrMissingSignatureKey
+ }
+
+ var signatureBlocks []jsParsedSignature
+ err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
+ }
+
+ js := newJSONSignature()
+ js.signatures = make([]*jsSignature, len(signatureBlocks))
+
+ for i, signatureBlock := range signatureBlocks {
+ protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error: %s", err)
+ }
+ var protectedHeader map[string]interface{}
+ err = json.Unmarshal(protectedBytes, &protectedHeader)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
+ }
+
+ formatLength, ok := readIntFromMap("formatLength", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted length")
+ }
+ encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted tail")
+ }
+ formatTail, err := joseBase64UrlDecode(encodedTail)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error on tail: %s", err)
+ }
+ if js.formatLength == 0 {
+ js.formatLength = formatLength
+ } else if js.formatLength != formatLength {
+ return nil, errors.New("conflicting format length")
+ }
+ if len(js.formatTail) == 0 {
+ js.formatTail = formatTail
+ } else if bytes.Compare(js.formatTail, formatTail) != 0 {
+ return nil, errors.New("conflicting format tail")
+ }
+
+ header := &jsHeader{
+ Algorithm: signatureBlock.Header.Algorithm,
+ Chain: signatureBlock.Header.Chain,
+ }
+ if signatureBlock.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling public key: %s", err)
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = &jsSignature{
+ Header: header,
+ Signature: signatureBlock.Signature,
+ Protected: signatureBlock.Protected,
+ }
+ }
+ if js.formatLength > len(content) {
+ return nil, errors.New("invalid format length")
+ }
+ formatted := make([]byte, js.formatLength+len(js.formatTail))
+ copy(formatted, content[:js.formatLength])
+ copy(formatted[js.formatLength:], js.formatTail)
+ js.indent = detectJSONIndent(formatted)
+ js.payload = joseBase64UrlEncode(formatted)
+
+ return js, nil
+}
+
+// PrettySignature formats a json signature into an easy to read
+// single json serialized object.
+func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("no signatures")
+ }
+ payload, err := joseBase64UrlDecode(js.payload)
+ if err != nil {
+ return nil, err
+ }
+ payload = payload[:js.formatLength]
+
+ var marshalled []byte
+ var marshallErr error
+ if js.indent != "" {
+ marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
+ } else {
+ marshalled, marshallErr = json.Marshal(js.signatures)
+ }
+ if marshallErr != nil {
+ return nil, marshallErr
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
+ buf.Write(payload)
+ buf.WriteByte(',')
+ if js.indent != "" {
+ buf.WriteByte('\n')
+ buf.WriteString(js.indent)
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\": ")
+ buf.Write(marshalled)
+ buf.WriteByte('\n')
+ } else {
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\":")
+ buf.Write(marshalled)
+ }
+ buf.WriteByte('}')
+
+ return buf.Bytes(), nil
+}
+
+// Merge combines the signatures from one or more other signatures into the
+// method receiver. If the payloads differ for any argument, an error will be
+// returned and the receiver will not be modified.
+func (js *JSONSignature) Merge(others ...*JSONSignature) error {
+ merged := js.signatures
+ for _, other := range others {
+ if js.payload != other.payload {
+ return fmt.Errorf("payloads differ from merge target")
+ }
+ merged = append(merged, other.signatures...)
+ }
+
+ js.signatures = merged
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go
new file mode 100644
index 00000000..b4f26979
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go
@@ -0,0 +1,380 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+ "io"
+ "testing"
+
+ "github.com/docker/libtrust/testutil"
+)
+
+func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) {
+ testMap := map[string]interface{}{
+ "name": "dmcgowan/mycontainer",
+ "config": map[string]interface{}{
+ "ports": []int{9101, 9102},
+ "run": "/bin/echo \"Hello\"",
+ },
+ "layers": []string{
+ "2893c080-27f5-11e4-8c21-0800200c9a66",
+ "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55",
+ "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4",
+ "0b6da891-7f7f-4abf-9c97-7887549e696c",
+ "1d960389-ae4f-4011-85fd-18d0f96a67ad",
+ },
+ }
+ formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{`
+ formattedSection = fmt.Sprintf(formattedSection, sigKey)
+ if indent != "" {
+ buf := bytes.NewBuffer(nil)
+ json.Indent(buf, []byte(formattedSection), "", indent)
+ return testMap, buf.Bytes()
+ }
+ return testMap, []byte(formattedSection)
+
+}
+
+func TestSignJSON(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating EC key: %s", err)
+ }
+
+ testMap, _ := createTestJSON("buildSignatures", " ")
+ indented, err := json.MarshalIndent(testMap, "", " ")
+ if err != nil {
+ t.Fatalf("Marshall error: %s", err)
+ }
+
+ js, err := NewJSONSignature(indented)
+ if err != nil {
+ t.Fatalf("Error creating JSON signature: %s", err)
+ }
+ err = js.Sign(key)
+ if err != nil {
+ t.Fatalf("Error signing content: %s", err)
+ }
+
+ keys, err := js.Verify()
+ if err != nil {
+ t.Fatalf("Error verifying signature: %s", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("Error wrong number of keys returned")
+ }
+ if keys[0].KeyID() != key.KeyID() {
+ t.Fatalf("Unexpected public key returned")
+ }
+
+}
+
+func TestSignMap(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating EC key: %s", err)
+ }
+
+ testMap, _ := createTestJSON("buildSignatures", " ")
+ js, err := NewJSONSignatureFromMap(testMap)
+ if err != nil {
+ t.Fatalf("Error creating JSON signature: %s", err)
+ }
+ err = js.Sign(key)
+ if err != nil {
+ t.Fatalf("Error signing JSON signature: %s", err)
+ }
+
+ keys, err := js.Verify()
+ if err != nil {
+ t.Fatalf("Error verifying signature: %s", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("Error wrong number of keys returned")
+ }
+ if keys[0].KeyID() != key.KeyID() {
+ t.Fatalf("Unexpected public key returned")
+ }
+}
+
+func TestFormattedJson(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating EC key: %s", err)
+ }
+
+ testMap, firstSection := createTestJSON("buildSignatures", " ")
+ indented, err := json.MarshalIndent(testMap, "", " ")
+ if err != nil {
+ t.Fatalf("Marshall error: %s", err)
+ }
+
+ js, err := NewJSONSignature(indented)
+ if err != nil {
+ t.Fatalf("Error creating JSON signature: %s", err)
+ }
+ err = js.Sign(key)
+ if err != nil {
+ t.Fatalf("Error signing content: %s", err)
+ }
+
+ b, err := js.PrettySignature("buildSignatures")
+ if err != nil {
+ t.Fatalf("Error signing map: %s", err)
+ }
+
+ if bytes.Compare(b[:len(firstSection)], firstSection) != 0 {
+ t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)])
+ }
+
+ parsed, err := ParsePrettySignature(b, "buildSignatures")
+ if err != nil {
+ t.Fatalf("Error parsing formatted signature: %s", err)
+ }
+
+ keys, err := parsed.Verify()
+ if err != nil {
+ t.Fatalf("Error verifying signature: %s", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("Error wrong number of keys returned")
+ }
+ if keys[0].KeyID() != key.KeyID() {
+ t.Fatalf("Unexpected public key returned")
+ }
+
+ var unmarshalled map[string]interface{}
+ err = json.Unmarshal(b, &unmarshalled)
+ if err != nil {
+ t.Fatalf("Could not unmarshall after parse: %s", err)
+ }
+
+}
+
+func TestFormattedFlatJson(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating EC key: %s", err)
+ }
+
+ testMap, firstSection := createTestJSON("buildSignatures", "")
+ unindented, err := json.Marshal(testMap)
+ if err != nil {
+ t.Fatalf("Marshall error: %s", err)
+ }
+
+ js, err := NewJSONSignature(unindented)
+ if err != nil {
+ t.Fatalf("Error creating JSON signature: %s", err)
+ }
+ err = js.Sign(key)
+ if err != nil {
+ t.Fatalf("Error signing JSON signature: %s", err)
+ }
+
+ b, err := js.PrettySignature("buildSignatures")
+ if err != nil {
+ t.Fatalf("Error signing map: %s", err)
+ }
+
+ if bytes.Compare(b[:len(firstSection)], firstSection) != 0 {
+ t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)])
+ }
+
+ parsed, err := ParsePrettySignature(b, "buildSignatures")
+ if err != nil {
+ t.Fatalf("Error parsing formatted signature: %s", err)
+ }
+
+ keys, err := parsed.Verify()
+ if err != nil {
+ t.Fatalf("Error verifying signature: %s", err)
+ }
+ if len(keys) != 1 {
+ t.Fatalf("Error wrong number of keys returned")
+ }
+ if keys[0].KeyID() != key.KeyID() {
+ t.Fatalf("Unexpected public key returned")
+ }
+}
+
+func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) {
+ parent := ca
+ parentKey := key
+ chain := make([]*x509.Certificate, 6)
+ for i := 5; i > 0; i-- {
+ intermediatekey, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generate key: %s", err)
+ }
+ chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
+ if err != nil {
+ t.Fatalf("Error generating intermdiate certificate: %s", err)
+ }
+ parent = chain[i]
+ parentKey = intermediatekey
+ }
+ trustKey, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generate key: %s", err)
+ }
+ chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
+ if err != nil {
+ t.Fatalf("Error generate trust cert: %s", err)
+ }
+
+ return trustKey, chain
+}
+
+func TestChainVerify(t *testing.T) {
+ caKey, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating key: %s", err)
+ }
+ ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey())
+ if err != nil {
+ t.Fatalf("Error generating ca: %s", err)
+ }
+ trustKey, chain := generateTrustChain(t, caKey, ca)
+
+ testMap, _ := createTestJSON("verifySignatures", " ")
+ js, err := NewJSONSignatureFromMap(testMap)
+ if err != nil {
+ t.Fatalf("Error creating JSONSignature from map: %s", err)
+ }
+
+ err = js.SignWithChain(trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error signing with chain: %s", err)
+ }
+
+ pool := x509.NewCertPool()
+ pool.AddCert(ca)
+ chains, err := js.VerifyChains(pool)
+ if err != nil {
+ t.Fatalf("Error verifying content: %s", err)
+ }
+ if len(chains) != 1 {
+ t.Fatalf("Unexpected chains length: %d", len(chains))
+ }
+ if len(chains[0]) != 7 {
+ t.Fatalf("Unexpected chain length: %d", len(chains[0]))
+ }
+}
+
+func TestInvalidChain(t *testing.T) {
+ caKey, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating key: %s", err)
+ }
+ ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey())
+ if err != nil {
+ t.Fatalf("Error generating ca: %s", err)
+ }
+ trustKey, chain := generateTrustChain(t, caKey, ca)
+
+ testMap, _ := createTestJSON("verifySignatures", " ")
+ js, err := NewJSONSignatureFromMap(testMap)
+ if err != nil {
+ t.Fatalf("Error creating JSONSignature from map: %s", err)
+ }
+
+ err = js.SignWithChain(trustKey, chain[:5])
+ if err != nil {
+ t.Fatalf("Error signing with chain: %s", err)
+ }
+
+ pool := x509.NewCertPool()
+ pool.AddCert(ca)
+ chains, err := js.VerifyChains(pool)
+ if err == nil {
+ t.Fatalf("Expected error verifying with bad chain")
+ }
+ if len(chains) != 0 {
+ t.Fatalf("Unexpected chains returned from invalid verify")
+ }
+}
+
+func TestMergeSignatures(t *testing.T) {
+ pk1, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("unexpected error generating private key 1: %v", err)
+ }
+
+ pk2, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("unexpected error generating private key 2: %v", err)
+ }
+
+ payload := make([]byte, 1<<10)
+ if _, err = io.ReadFull(rand.Reader, payload); err != nil {
+ t.Fatalf("error generating payload: %v", err)
+ }
+
+ payload, _ = json.Marshal(map[string]interface{}{"data": payload})
+
+ sig1, err := NewJSONSignature(payload)
+ if err != nil {
+ t.Fatalf("unexpected error creating signature 1: %v", err)
+ }
+
+ if err := sig1.Sign(pk1); err != nil {
+ t.Fatalf("unexpected error signing with pk1: %v", err)
+ }
+
+ sig2, err := NewJSONSignature(payload)
+ if err != nil {
+ t.Fatalf("unexpected error creating signature 2: %v", err)
+ }
+
+ if err := sig2.Sign(pk2); err != nil {
+ t.Fatalf("unexpected error signing with pk2: %v", err)
+ }
+
+ // Now, we actually merge into sig1
+ if err := sig1.Merge(sig2); err != nil {
+ t.Fatalf("unexpected error merging: %v", err)
+ }
+
+ // Verify the new signature package
+ pubkeys, err := sig1.Verify()
+ if err != nil {
+ t.Fatalf("unexpected error during verify: %v", err)
+ }
+
+ // Make sure the pubkeys match the two private keys from before
+ privkeys := map[string]PrivateKey{
+ pk1.KeyID(): pk1,
+ pk2.KeyID(): pk2,
+ }
+
+ found := map[string]struct{}{}
+
+ for _, pubkey := range pubkeys {
+ if _, ok := privkeys[pubkey.KeyID()]; !ok {
+ t.Fatalf("unexpected public key found during verification: %v", pubkey)
+ }
+
+ found[pubkey.KeyID()] = struct{}{}
+ }
+
+ // Make sure we've found all the private keys from verification
+ for keyid, _ := range privkeys {
+ if _, ok := found[keyid]; !ok {
+ t.Fatalf("public key %v not found during verification", keyid)
+ }
+ }
+
+ // Create another signature, with a different payload, and ensure we get an error.
+ sig3, err := NewJSONSignature([]byte("{}"))
+ if err != nil {
+ t.Fatalf("unexpected error making signature for sig3: %v", err)
+ }
+
+ if err := sig1.Merge(sig3); err == nil {
+ t.Fatalf("error expected during invalid merge with different payload")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/Godeps/_workspace/src/github.com/docker/libtrust/key.go
new file mode 100644
index 00000000..73642db2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/key.go
@@ -0,0 +1,253 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// PublicKey is a generic interface for a Public Key.
+type PublicKey interface {
+ // KeyType returns the key type for this key. For elliptic curve keys,
+ // this value should be "EC". For RSA keys, this value should be "RSA".
+ KeyType() string
+ // KeyID returns a distinct identifier which is unique to this Public Key.
+ // The format generated by this library is a base32 encoding of a 240 bit
+ // hash of the public key data divided into 12 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ KeyID() string
+ // Verify verifyies the signature of the data in the io.Reader using this
+ // Public Key. The alg parameter should identify the digital signature
+ // algorithm which was used to produce the signature and should be
+ // supported by this public key. Returns a nil error if the signature
+ // is valid.
+ Verify(data io.Reader, alg string, signature []byte) error
+ // CryptoPublicKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The type
+ // is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPublicKey() crypto.PublicKey
+ // These public keys can be serialized to the standard JSON encoding for
+ // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
+ // Algorithms.
+ MarshalJSON() ([]byte, error)
+ // These keys can also be serialized to the standard PEM encoding.
+ PEMBlock() (*pem.Block, error)
+ // The string representation of a key is its key type and ID.
+ String() string
+ AddExtendedField(string, interface{})
+ GetExtendedField(string) interface{}
+}
+
+// PrivateKey is a generic interface for a Private Key.
+type PrivateKey interface {
+ // A PrivateKey contains all fields and methods of a PublicKey of the
+ // same type. The MarshalJSON method also outputs the private key as a
+ // JSON Web Key, and the PEMBlock method outputs the private key as a
+ // PEM block.
+ PublicKey
+ // PublicKey returns the PublicKey associated with this PrivateKey.
+ PublicKey() PublicKey
+ // Sign signs the data read from the io.Reader using a signature algorithm
+ // supported by the private key. If the specified hashing algorithm is
+ // supported by this key, that hash function is used to generate the
+ // signature otherwise the the default hashing algorithm for this key is
+ // used. Returns the signature and identifier of the algorithm used.
+ Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
+ // CryptoPrivateKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The
+ // type is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPrivateKey() crypto.PrivateKey
+}
+
+// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
+// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
+ switch cryptoPublicKey := cryptoPublicKey.(type) {
+ case *ecdsa.PublicKey:
+ return fromECPublicKey(cryptoPublicKey)
+ case *rsa.PublicKey:
+ return fromRSAPublicKey(cryptoPublicKey), nil
+ default:
+ return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
+ }
+}
+
+// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
+// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
+ switch cryptoPrivateKey := cryptoPrivateKey.(type) {
+ case *ecdsa.PrivateKey:
+ return fromECPrivateKey(cryptoPrivateKey)
+ case *rsa.PrivateKey:
+ return fromRSAPrivateKey(cryptoPrivateKey), nil
+ default:
+ return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
+ }
+}
+
+// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
+// PublicKey or an error if there is a problem with the encoding.
+func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ return pubKeyFromPEMBlock(pemBlock)
+}
+
+// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
+// PEM blocks appended one after the other and returns a slice of PublicKey
+// objects that it finds.
+func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
+ pubKeys := []PublicKey{}
+
+ for {
+ var pemBlock *pem.Block
+ pemBlock, data = pem.Decode(data)
+ if pemBlock == nil {
+ break
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ pubKey, err := pubKeyFromPEMBlock(pemBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
+// PrivateKey or an error if there is a problem with the encoding.
+func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ }
+
+ var key PrivateKey
+
+ switch {
+ case pemBlock.Type == "RSA PRIVATE KEY":
+ rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
+ }
+ key = fromRSAPrivateKey(rsaPrivateKey)
+ case pemBlock.Type == "EC PRIVATE KEY":
+ ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
+ }
+ key, err = fromECPrivateKey(ecPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
+ }
+
+ addPEMHeadersToKey(pemBlock, key.PublicKey())
+
+ return key, nil
+}
+
+// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
+// Public Key to be used with libtrust.
+func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Public Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Public Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC public key.
+ return ecPublicKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA public key.
+ return rsaPublicKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Public Key type not supported: %q\n", kty,
+ )
+ }
+}
+
+// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
+// and returns a slice of Public Key objects.
+func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
+ rawKeys, err := loadJSONKeySetRaw(data)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys := make([]PublicKey, 0, len(rawKeys))
+
+ for _, rawKey := range rawKeys {
+ pubKey, err := UnmarshalPublicKeyJWK(rawKey)
+ if err != nil {
+ return nil, err
+ }
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
+// Private Key to be used with libtrust.
+func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Private Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Private Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC private key.
+ return ecPrivateKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA private key.
+ return rsaPrivateKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Private Key type not supported: %q\n", kty,
+ )
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go
new file mode 100644
index 00000000..c526de54
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go
@@ -0,0 +1,255 @@
+package libtrust
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+var (
+ // ErrKeyFileDoesNotExist indicates that the private key file does not exist.
+ ErrKeyFileDoesNotExist = errors.New("key file does not exist")
+)
+
+func readKeyFileBytes(filename string) ([]byte, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = ErrKeyFileDoesNotExist
+ } else {
+ err = fmt.Errorf("unable to read key file %s: %s", filename, err)
+ }
+
+ return nil, err
+ }
+
+ return data, nil
+}
+
+/*
+ Loading and Saving of Public and Private Keys in either PEM or JWK format.
+*/
+
+// LoadKeyFile opens the given filename and attempts to read a Private Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadKeyFile(filename string) (PrivateKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PrivateKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPrivateKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPrivateKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadPublicKeyFile(filename string) (PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PublicKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPublicKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPublicKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// SaveKey saves the given key to a file using the provided filename.
+// This process will overwrite any existing file at the provided location.
+func SaveKey(filename string, key PrivateKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
+ if err != nil {
+ return fmt.Errorf("unable to write private key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// SavePublicKey saves the given public key to the file.
+func SavePublicKey(filename string, key PublicKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode public key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode public key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write public key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// Public Key Set files
+
+type jwkSet struct {
+ Keys []json.RawMessage `json:"keys"`
+}
+
+// LoadKeySetFile loads a key set
+func LoadKeySetFile(filename string) ([]PublicKey, error) {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return loadJSONKeySetFile(filename)
+ }
+
+ // Must be a PEM format file
+ return loadPEMKeySetFile(filename)
+}
+
+func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
+ if len(data) == 0 {
+ // This is okay, just return an empty slice.
+ return []json.RawMessage{}, nil
+ }
+
+ keySet := jwkSet{}
+
+ err := json.Unmarshal(data, &keySet)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
+ }
+
+ return keySet.Keys, nil
+}
+
+func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyJWKSet(contents)
+}
+
+func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
+ data, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyPEMBundle(data)
+}
+
+// AddKeySetFile adds a key to a key set
+func AddKeySetFile(filename string, key PublicKey) error {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return addKeySetJSONFile(filename, key)
+ }
+
+ // Must be a PEM format file
+ return addKeySetPEMFile(filename, key)
+}
+
+func addKeySetJSONFile(filename string, key PublicKey) error {
+ encodedKey, err := json.Marshal(key)
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client key: %s", err)
+ }
+
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return err
+ }
+
+ rawEntries, err := loadJSONKeySetRaw(contents)
+ if err != nil {
+ return err
+ }
+
+ rawEntries = append(rawEntries, json.RawMessage(encodedKey))
+ entriesWrapper := jwkSet{Keys: rawEntries}
+
+ encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client keys: %s", err)
+ }
+
+ err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+func addKeySetPEMFile(filename string, key PublicKey) error {
+ // Encode to PEM, open file for appending, write PEM.
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
+ }
+ defer file.Close()
+
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encoded trusted key: %s", err)
+ }
+
+ _, err = file.Write(pem.EncodeToMemory(pemBlock))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted keys file: %s", err)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go
new file mode 100644
index 00000000..57e691f2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go
@@ -0,0 +1,220 @@
+package libtrust
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func makeTempFile(t *testing.T, prefix string) (filename string) {
+ file, err := ioutil.TempFile("", prefix)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ filename = file.Name()
+ file.Close()
+
+ return
+}
+
+func TestKeyFiles(t *testing.T) {
+ key, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testKeyFiles(t, key)
+
+ key, err = GenerateRSA2048PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testKeyFiles(t, key)
+}
+
+func testKeyFiles(t *testing.T, key PrivateKey) {
+ var err error
+
+ privateKeyFilename := makeTempFile(t, "private_key")
+ privateKeyFilenamePEM := privateKeyFilename + ".pem"
+ privateKeyFilenameJWK := privateKeyFilename + ".jwk"
+
+ publicKeyFilename := makeTempFile(t, "public_key")
+ publicKeyFilenamePEM := publicKeyFilename + ".pem"
+ publicKeyFilenameJWK := publicKeyFilename + ".jwk"
+
+ if err = SaveKey(privateKeyFilenamePEM, key); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = SaveKey(privateKeyFilenameJWK, key); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil {
+ t.Fatal(err)
+ }
+
+ loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if key.KeyID() != loadedPEMKey.KeyID() {
+ t.Fatal(errors.New("key IDs do not match"))
+ }
+
+ if key.KeyID() != loadedJWKKey.KeyID() {
+ t.Fatal(errors.New("key IDs do not match"))
+ }
+
+ if key.KeyID() != loadedPEMPublicKey.KeyID() {
+ t.Fatal(errors.New("key IDs do not match"))
+ }
+
+ if key.KeyID() != loadedJWKPublicKey.KeyID() {
+ t.Fatal(errors.New("key IDs do not match"))
+ }
+
+ os.Remove(privateKeyFilename)
+ os.Remove(privateKeyFilenamePEM)
+ os.Remove(privateKeyFilenameJWK)
+ os.Remove(publicKeyFilename)
+ os.Remove(publicKeyFilenamePEM)
+ os.Remove(publicKeyFilenameJWK)
+}
+
+func TestTrustedHostKeysFile(t *testing.T) {
+ trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys")
+ trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem"
+ trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json"
+
+ testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM)
+ testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK)
+
+ os.Remove(trustedHostKeysFilename)
+ os.Remove(trustedHostKeysFilenamePEM)
+ os.Remove(trustedHostKeysFilenameJWK)
+}
+
+func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) {
+ hostAddress1 := "docker.example.com:2376"
+ hostKey1, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hostKey1.AddExtendedField("hosts", []string{hostAddress1})
+ err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for addr, hostKey := range trustedHostKeysMapping {
+ t.Logf("Host Address: %d\n", addr)
+ t.Logf("Host Key: %s\n\n", hostKey)
+ }
+
+ hostAddress2 := "192.168.59.103:2376"
+ hostKey2, err := GenerateRSA2048PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hostKey2.AddExtendedField("hosts", hostAddress2)
+ err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for addr, hostKey := range trustedHostKeysMapping {
+ t.Logf("Host Address: %d\n", addr)
+ t.Logf("Host Key: %s\n\n", hostKey)
+ }
+
+}
+
+func TestTrustedClientKeysFile(t *testing.T) {
+ trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys")
+ trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem"
+ trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json"
+
+ testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM)
+ testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK)
+
+ os.Remove(trustedClientKeysFilename)
+ os.Remove(trustedClientKeysFilenamePEM)
+ os.Remove(trustedClientKeysFilenameJWK)
+}
+
+func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) {
+ clientKey1, err := GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, clientKey := range trustedClientKeys {
+ t.Logf("Client Key: %s\n", clientKey)
+ }
+
+ clientKey2, err := GenerateRSA2048PrivateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, clientKey := range trustedClientKeys {
+ t.Logf("Client Key: %s\n", clientKey)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go
new file mode 100644
index 00000000..9a98ae35
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "sync"
+)
+
+// ClientKeyManager manages client keys on the filesystem
+type ClientKeyManager struct {
+ key PrivateKey
+ clientFile string
+ clientDir string
+
+ clientLock sync.RWMutex
+ clients []PublicKey
+
+ configLock sync.Mutex
+ configs []*tls.Config
+}
+
+// NewClientKeyManager loads a new manager from a set of key files
+// and managed by the given private key.
+func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
+ m := &ClientKeyManager{
+ key: trustKey,
+ clientFile: clientFile,
+ clientDir: clientDir,
+ }
+ if err := m.loadKeys(); err != nil {
+ return nil, err
+ }
+ // TODO Start watching file and directory
+
+ return m, nil
+}
+
+func (c *ClientKeyManager) loadKeys() (err error) {
+ // Load authorized keys file
+ var clients []PublicKey
+ if c.clientFile != "" {
+ clients, err = LoadKeySetFile(c.clientFile)
+ if err != nil {
+ return fmt.Errorf("unable to load authorized keys: %s", err)
+ }
+ }
+
+ // Add clients from authorized keys directory
+ files, err := ioutil.ReadDir(c.clientDir)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("unable to open authorized keys directory: %s", err)
+ }
+ for _, f := range files {
+ if !f.IsDir() {
+ publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
+ if err != nil {
+ return fmt.Errorf("unable to load authorized key file: %s", err)
+ }
+ clients = append(clients, publicKey)
+ }
+ }
+
+ c.clientLock.Lock()
+ c.clients = clients
+ c.clientLock.Unlock()
+
+ return nil
+}
+
+// RegisterTLSConfig registers a tls configuration to manager
+// such that any changes to the keys may be reflected in
+// the tls client CA pool
+func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
+ c.clientLock.RLock()
+ certPool, err := GenerateCACertPool(c.key, c.clients)
+ if err != nil {
+ return fmt.Errorf("CA pool generation error: %s", err)
+ }
+ c.clientLock.RUnlock()
+
+ tlsConfig.ClientCAs = certPool
+
+ c.configLock.Lock()
+ c.configs = append(c.configs, tlsConfig)
+ c.configLock.Unlock()
+
+ return nil
+}
+
+// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
+// libtrust identity authentication for the domain specified
+func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
+ return nil, err
+ }
+
+ // Generate cert
+ ips, domains, err := parseAddr(addr)
+ if err != nil {
+ return nil, err
+ }
+ // add domain that it expects clients to use
+ domains = append(domains, domain)
+ x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ return tlsConfig, nil
+}
+
+// NewCertAuthTLSConfig creates a tls.Config for the server to use for
+// certificate authentication
+func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ cert, err := tls.LoadX509KeyPair(certPath, keyPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+
+ // Verify client certificates against a CA?
+ if caPath != "" {
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
+ }
+ certPool.AppendCertsFromPEM(file)
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = certPool
+ }
+
+ return tlsConfig, nil
+}
+
+func newTLSConfig() *tls.Config {
+ return &tls.Config{
+ NextProtos: []string{"http/1.1"},
+ // Avoid fallback on insecure SSL protocols
+ MinVersion: tls.VersionTLS10,
+ }
+}
+
+// parseAddr parses an address into an array of IPs and domains
+func parseAddr(addr string) ([]net.IP, []string, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ var domains []string
+ var ips []net.IP
+ ip := net.ParseIP(host)
+ if ip != nil {
+ ips = []net.IP{ip}
+ } else {
+ domains = []string{host}
+ }
+ return ips, domains, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go
new file mode 100644
index 00000000..f6c59cc4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go
@@ -0,0 +1,80 @@
+package libtrust
+
+import (
+ "testing"
+)
+
+type generateFunc func() (PrivateKey, error)
+
+func runGenerateBench(b *testing.B, f generateFunc, name string) {
+ for i := 0; i < b.N; i++ {
+ _, err := f()
+ if err != nil {
+ b.Fatalf("Error generating %s: %s", name, err)
+ }
+ }
+}
+
+func runFingerprintBench(b *testing.B, f generateFunc, name string) {
+ b.StopTimer()
+ // Don't count this relatively slow generation call.
+ key, err := f()
+ if err != nil {
+ b.Fatalf("Error generating %s: %s", name, err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if key.KeyID() == "" {
+ b.Fatalf("Error generating key ID for %s", name)
+ }
+ }
+}
+
+func BenchmarkECP256Generate(b *testing.B) {
+ runGenerateBench(b, GenerateECP256PrivateKey, "P256")
+}
+
+func BenchmarkECP384Generate(b *testing.B) {
+ runGenerateBench(b, GenerateECP384PrivateKey, "P384")
+}
+
+func BenchmarkECP521Generate(b *testing.B) {
+ runGenerateBench(b, GenerateECP521PrivateKey, "P521")
+}
+
+func BenchmarkRSA2048Generate(b *testing.B) {
+ runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048")
+}
+
+func BenchmarkRSA3072Generate(b *testing.B) {
+ runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072")
+}
+
+func BenchmarkRSA4096Generate(b *testing.B) {
+ runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096")
+}
+
+func BenchmarkECP256Fingerprint(b *testing.B) {
+ runFingerprintBench(b, GenerateECP256PrivateKey, "P256")
+}
+
+func BenchmarkECP384Fingerprint(b *testing.B) {
+ runFingerprintBench(b, GenerateECP384PrivateKey, "P384")
+}
+
+func BenchmarkECP521Fingerprint(b *testing.B) {
+ runFingerprintBench(b, GenerateECP521PrivateKey, "P521")
+}
+
+func BenchmarkRSA2048Fingerprint(b *testing.B) {
+ runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048")
+}
+
+func BenchmarkRSA3072Fingerprint(b *testing.B) {
+ runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072")
+}
+
+func BenchmarkRSA4096Fingerprint(b *testing.B) {
+ runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096")
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go
new file mode 100644
index 00000000..dac4cacf
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go
@@ -0,0 +1,427 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * RSA DSA PUBLIC KEY
+ */
+
+// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
+type rsaPublicKey struct {
+ *rsa.PublicKey
+ extended map[string]interface{}
+}
+
+func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
+ return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
+}
+
+// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
+func (k *rsaPublicKey) KeyType() string {
+ return "RSA"
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *rsaPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *rsaPublicKey) String() string {
+ return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this Public Key.
+// The alg parameter should be the name of the JWA digital signature algorithm
+// which was used to produce the signature and should be supported by this
+// public key. Returns a nil error if the signature is valid.
+func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // Verify the signature of the given date, return non-nil error if valid.
+ sigAlg, err := rsaSignatureAlgorithmByName(alg)
+ if err != nil {
+ return fmt.Errorf("unable to verify Signature: %s", err)
+ }
+
+ hasher := sigAlg.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
+ if err != nil {
+ return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *rsaPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
+ jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract 'n', 'e', and 'kid' and check for
+ // consistency.
+
+ // Get the modulus parameter N.
+ nB64Url, err := stringFromMap(jwk, "n")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ n, err := parseRSAModulusParam(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ // Get the public exponent E.
+ eB64Url, err := stringFromMap(jwk, "e")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ e, err := parseRSAPublicExponentParam(eB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ key := &rsaPublicKey{
+ PublicKey: &rsa.PublicKey{N: n, E: e},
+ }
+
+ // Key ID is optional, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
+ }
+ }
+
+ if _, ok := jwk["d"]; ok {
+ return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * RSA DSA PRIVATE KEY
+ */
+
+// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
+type rsaPrivateKey struct {
+ rsaPublicKey
+ *rsa.PrivateKey
+}
+
+func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
+ return &rsaPrivateKey{
+ *fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
+ cryptoPrivateKey,
+ }
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *rsaPrivateKey) PublicKey() PublicKey {
+ return &k.rsaPublicKey
+}
+
+func (k *rsaPrivateKey) String() string {
+ return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the RSA private key. If the specified hashing algorithm is supported by
+// this key, that hash function is used to generate the signature otherwise the
+// the default hashing algorithm for this key is used. Returns the signature
+// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
+// "RS512".
+func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
+ hasher := sigAlg.HashID().New()
+
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+
+ alg = sigAlg.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *rsaPrivateKey) toMap() map[string]interface{} {
+ k.Precompute() // Make sure the precomputed values are stored.
+ jwk := k.rsaPublicKey.toMap()
+
+ jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
+ jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
+ jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
+ jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
+ jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
+ jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
+
+ otherPrimes := k.Primes[2:]
+
+ if len(otherPrimes) > 0 {
+ otherPrimesInfo := make([]interface{}, len(otherPrimes))
+ for i, r := range otherPrimes {
+ otherPrimeInfo := make(map[string]string, 3)
+ otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
+ crtVal := k.Precomputed.CRTValues[i]
+ otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
+ otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
+ otherPrimesInfo[i] = otherPrimeInfo
+ }
+ jwk["oth"] = otherPrimesInfo
+ }
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
+}
+
+func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
+ // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
+ // only the private key exponent 'd' is REQUIRED, the others are just for
+ // signature/decryption optimizations and SHOULD be included when the JWK
+ // is produced. We MAY choose to accept a JWK which only includes 'd', but
+ // we're going to go ahead and not choose to accept it without the extra
+ // fields. Only the 'oth' field will be optional (for multi-prime keys).
+ privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
+ }
+ firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ var oth interface{}
+ if _, ok := jwk["oth"]; ok {
+ oth = jwk["oth"]
+ delete(jwk, "oth")
+ }
+
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract the public key information, then extract the private
+ // key values.
+ publicKey, err := rsaPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ privateKey := &rsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: privateExponent,
+ Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
+ Precomputed: rsa.PrecomputedValues{
+ Dp: firstFactorCRT,
+ Dq: secondFactorCRT,
+ Qinv: crtCoeff,
+ },
+ }
+
+ if oth != nil {
+ // Should be an array of more JSON objects.
+ otherPrimesInfo, ok := oth.([]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
+ }
+ numOtherPrimeFactors := len(otherPrimesInfo)
+ if numOtherPrimeFactors == 0 {
+ return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
+ }
+ otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
+ productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
+ crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
+
+ for i, val := range otherPrimesInfo {
+ otherPrimeinfo, ok := val.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
+ }
+
+ otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ crtValue := crtValues[i]
+ crtValue.Exp = otherFactorCRT
+ crtValue.Coeff = otherCrtCoeff
+ crtValue.R = productOfPrimes
+ otherPrimeFactors[i] = otherPrimeFactor
+ productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
+ }
+
+ privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
+ privateKey.Precomputed.CRTValues = crtValues
+ }
+
+ key := &rsaPrivateKey{
+ rsaPublicKey: *publicKey,
+ PrivateKey: privateKey,
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
+ k = new(rsaPrivateKey)
+ k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+
+ k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
+func GenerateRSA2048PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(2048)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
+func GenerateRSA3072PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(3072)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
+func GenerateRSA4096PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(4096)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
+ }
+
+ return k, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go
new file mode 100644
index 00000000..5ec7707a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go
@@ -0,0 +1,157 @@
+package libtrust
+
+import (
+ "bytes"
+ "encoding/json"
+ "log"
+ "testing"
+)
+
+var rsaKeys []PrivateKey
+
+func init() {
+ var err error
+ rsaKeys, err = generateRSATestKeys()
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func generateRSATestKeys() (keys []PrivateKey, err error) {
+ log.Println("Generating RSA 2048-bit Test Key")
+ rsa2048Key, err := GenerateRSA2048PrivateKey()
+ if err != nil {
+ return
+ }
+
+ log.Println("Generating RSA 3072-bit Test Key")
+ rsa3072Key, err := GenerateRSA3072PrivateKey()
+ if err != nil {
+ return
+ }
+
+ log.Println("Generating RSA 4096-bit Test Key")
+ rsa4096Key, err := GenerateRSA4096PrivateKey()
+ if err != nil {
+ return
+ }
+
+ log.Println("Done generating RSA Test Keys!")
+ keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key}
+
+ return
+}
+
+func TestRSAKeys(t *testing.T) {
+ for _, rsaKey := range rsaKeys {
+ if rsaKey.KeyType() != "RSA" {
+ t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType())
+ }
+ }
+}
+
+func TestRSASignVerify(t *testing.T) {
+ message := "Hello, World!"
+ data := bytes.NewReader([]byte(message))
+
+ sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512}
+
+ for i, rsaKey := range rsaKeys {
+ sigAlg := sigAlgs[i]
+
+ t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID())
+
+ data.Seek(0, 0) // Reset the byte reader
+
+ // Sign
+ sig, alg, err := rsaKey.Sign(data, sigAlg.HashID())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data.Seek(0, 0) // Reset the byte reader
+
+ // Verify
+ err = rsaKey.Verify(data, alg, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalRSAKeys(t *testing.T) {
+ data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test."))
+ sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512}
+
+ for i, rsaKey := range rsaKeys {
+ sigAlg := sigAlgs[i]
+ privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Logf("JWK Private Key: %s", string(privateJWKJSON))
+ t.Logf("JWK Public Key: %s", string(publicJWKJSON))
+
+ privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Ensure we can sign/verify a message with the unmarshalled keys.
+ data.Seek(0, 0) // Reset the byte reader
+ signature, alg, err := privKey2.Sign(data, sigAlg.HashID())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data.Seek(0, 0) // Reset the byte reader
+ err = pubKey2.Verify(data, alg, signature)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // It's a good idea to validate the Private Key to make sure our
+ // (un)marshal process didn't corrupt the extra parameters.
+ k := privKey2.(*rsaPrivateKey)
+ err = k.PrivateKey.Validate()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestFromCryptoRSAKeys(t *testing.T) {
+ for _, rsaKey := range rsaKeys {
+ cryptoPrivateKey := rsaKey.CryptoPrivateKey()
+ cryptoPublicKey := rsaKey.CryptoPublicKey()
+
+ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if pubKey.KeyID() != rsaKey.KeyID() {
+ t.Fatal("public key key ID mismatch")
+ }
+
+ privKey, err := FromCryptoPrivateKey(cryptoPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if privKey.KeyID() != rsaKey.KeyID() {
+ t.Fatal("public key key ID mismatch")
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go
new file mode 100644
index 00000000..89debf6b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go
@@ -0,0 +1,94 @@
+package testutil
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "math/big"
+ "time"
+)
+
+// GenerateTrustCA generates a new certificate authority for testing.
+func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) {
+ cert := &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: "CA Root",
+ },
+ NotBefore: time.Now().Add(-time.Second),
+ NotAfter: time.Now().Add(time.Hour),
+ IsCA: true,
+ KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
+ BasicConstraintsValid: true,
+ }
+
+ certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv)
+ if err != nil {
+ return nil, err
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+// GenerateIntermediate generates an intermediate certificate for testing using
+// the parent certificate (likely a CA) and the provided keys.
+func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) {
+ cert := &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: "Intermediate",
+ },
+ NotBefore: time.Now().Add(-time.Second),
+ NotAfter: time.Now().Add(time.Hour),
+ IsCA: true,
+ KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
+ BasicConstraintsValid: true,
+ }
+
+ certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, err
+ }
+
+ return cert, nil
+}
+
+// GenerateTrustCert generates a new trust certificate for testing. Unlike the
+// intermediate certificates, this certificate should be used for signature
+// only, not creating certificates.
+func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) {
+ cert := &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: "Trust Cert",
+ },
+ NotBefore: time.Now().Add(-time.Second),
+ NotAfter: time.Now().Add(time.Hour),
+ IsCA: true,
+ KeyUsage: x509.KeyUsageDigitalSignature,
+ BasicConstraintsValid: true,
+ }
+
+ certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, err
+ }
+
+ return cert, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md
new file mode 100644
index 00000000..24124db2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md
@@ -0,0 +1,50 @@
+## Libtrust TLS Config Demo
+
+This program generates key pairs and trust files for a TLS client and server.
+
+To generate the keys, run:
+
+```
+$ go run genkeys.go
+```
+
+The generated files are:
+
+```
+$ ls -l client_data/ server_data/
+client_data/:
+total 24
+-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json
+-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json
+-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json
+
+server_data/:
+total 24
+-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json
+-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json
+-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json
+```
+
+The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client.
+
+To start the server, run:
+
+```
+$ go run server.go
+```
+
+This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message.
+
+To make a request using the client, run:
+
+```
+$ go run client.go
+```
+
+This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server.
+
+The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running).
+
+```
+curl --cert cert.pem --key key.pem -k https://localhost:8888
+```
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go
new file mode 100644
index 00000000..0a699a0e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go
@@ -0,0 +1,89 @@
+package main
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+
+ "github.com/docker/libtrust"
+)
+
+var (
+ serverAddress = "localhost:8888"
+ privateKeyFilename = "client_data/private_key.pem"
+ trustedHostsFilename = "client_data/trusted_hosts.pem"
+)
+
+func main() {
+ // Load Client Key.
+ clientKey, err := libtrust.LoadKeyFile(privateKeyFilename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Generate Client Certificate.
+ selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Load trusted host keys.
+ hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Ensure the host we want to connect to is trusted!
+ host, _, err := net.SplitHostPort(serverAddress)
+ if err != nil {
+ log.Fatal(err)
+ }
+ serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false)
+ if err != nil {
+ log.Fatalf("%q is not a known and trusted host", host)
+ }
+
+ // Generate a CA pool with the trusted host's key.
+ caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create HTTP Client.
+ client := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ Certificates: []tls.Certificate{
+ tls.Certificate{
+ Certificate: [][]byte{selfSignedClientCert.Raw},
+ PrivateKey: clientKey.CryptoPrivateKey(),
+ Leaf: selfSignedClientCert,
+ },
+ },
+ RootCAs: caPool,
+ },
+ },
+ }
+
+ var makeRequest = func(url string) {
+ resp, err := client.Get(url)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ log.Println(resp.Status)
+ log.Println(string(body))
+ }
+
+ // Make the request to the trusted server!
+ makeRequest(fmt.Sprintf("https://%s", serverAddress))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go
new file mode 100644
index 00000000..c65f3b6b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go
@@ -0,0 +1,62 @@
+package main
+
+import (
+ "encoding/pem"
+ "fmt"
+ "log"
+ "net"
+
+ "github.com/docker/libtrust"
+)
+
+var (
+ serverAddress = "localhost:8888"
+ clientPrivateKeyFilename = "client_data/private_key.pem"
+ trustedHostsFilename = "client_data/trusted_hosts.pem"
+)
+
+func main() {
+ key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ keyPEMBlock, err := key.PEMBlock()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ encodedPrivKey := pem.EncodeToMemory(keyPEMBlock)
+ fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey))
+
+ cert, err := libtrust.GenerateSelfSignedClientCert(key)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
+ fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert))
+
+ trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ hostname, _, err := net.SplitHostPort(serverAddress)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0])
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw})
+ fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert))
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go
new file mode 100644
index 00000000..9dc8842a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go
@@ -0,0 +1,61 @@
+package main
+
+import (
+ "log"
+
+ "github.com/docker/libtrust"
+)
+
+func main() {
+ // Generate client key.
+ clientKey, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Add a comment for the client key.
+ clientKey.AddExtendedField("comment", "TLS Demo Client")
+
+ // Save the client key, public and private versions.
+ err = libtrust.SaveKey("client_data/private_key.pem", clientKey)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Generate server key.
+ serverKey, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Set the list of addresses to use for the server.
+ serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"})
+
+ // Save the server key, public and private versions.
+ err = libtrust.SaveKey("server_data/private_key.pem", serverKey)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Generate Authorized Keys file for server.
+ err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Generate Known Host Keys file for client.
+ err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey())
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go
new file mode 100644
index 00000000..d3cb2ea9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go
@@ -0,0 +1,80 @@
+package main
+
+import (
+ "crypto/tls"
+ "fmt"
+ "html"
+ "log"
+ "net"
+ "net/http"
+
+ "github.com/docker/libtrust"
+)
+
+var (
+ serverAddress = "localhost:8888"
+ privateKeyFilename = "server_data/private_key.pem"
+ authorizedClientsFilename = "server_data/trusted_clients.pem"
+)
+
+func requestHandler(w http.ResponseWriter, r *http.Request) {
+ clientCert := r.TLS.PeerCertificates[0]
+ keyID := clientCert.Subject.CommonName
+ log.Printf("Request from keyID: %s\n", keyID)
+ fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID))
+}
+
+func main() {
+ // Load server key.
+ serverKey, err := libtrust.LoadKeyFile(privateKeyFilename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Generate server certificate.
+ selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert(
+ serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")},
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Load authorized client keys.
+ authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create CA pool using trusted client keys.
+ caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create TLS config, requiring client certificates.
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{
+ tls.Certificate{
+ Certificate: [][]byte{selfSignedServerCert.Raw},
+ PrivateKey: serverKey.CryptoPrivateKey(),
+ Leaf: selfSignedServerCert,
+ },
+ },
+ ClientAuth: tls.RequireAndVerifyClientCert,
+ ClientCAs: caPool,
+ }
+
+ // Create HTTP server with simple request handler.
+ server := &http.Server{
+ Addr: serverAddress,
+ Handler: http.HandlerFunc(requestHandler),
+ }
+
+ // Listen and server HTTPS using the libtrust TLS config.
+ listener, err := net.Listen("tcp", server.Addr)
+ if err != nil {
+ log.Fatal(err)
+ }
+ tlsListener := tls.NewListener(listener, tlsConfig)
+ server.Serve(tlsListener)
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go
new file mode 100644
index 00000000..72b0fc36
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go
@@ -0,0 +1,50 @@
+package trustgraph
+
+import "github.com/docker/libtrust"
+
+// TrustGraph represents a graph of authorization mapping
+// public keys to nodes and grants between nodes.
+type TrustGraph interface {
+ // Verifies that the given public key is allowed to perform
+ // the given action on the given node according to the trust
+ // graph.
+ Verify(libtrust.PublicKey, string, uint16) (bool, error)
+
+ // GetGrants returns an array of all grant chains which are used to
+ // allow the requested permission.
+ GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error)
+}
+
+// Grant represents a transfer of permission from one part of the
+// trust graph to another. This is the only way to delegate
+// permission between two different sub trees in the graph.
+type Grant struct {
+ // Subject is the namespace being granted
+ Subject string
+
+ // Permissions is a bit map of permissions
+ Permission uint16
+
+ // Grantee represents the node being granted
+ // a permission scope. The grantee can be
+ // either a namespace item or a key id where namespace
+ // items will always start with a '/'.
+ Grantee string
+
+ // statement represents the statement used to create
+ // this object.
+ statement *Statement
+}
+
+// Permissions
+// Read node 0x01 (can read node, no sub nodes)
+// Write node 0x02 (can write to node object, cannot create subnodes)
+// Read subtree 0x04 (delegates read to each sub node)
+// Write subtree 0x08 (delegates write to each sub node, included create on the subject)
+//
+// Permission shortcuts
+// ReadItem = 0x01
+// WriteItem = 0x03
+// ReadAccess = 0x07
+// WriteAccess = 0x0F
+// Delegate = 0x0F
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go
new file mode 100644
index 00000000..247bfa7a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go
@@ -0,0 +1,133 @@
+package trustgraph
+
+import (
+ "strings"
+
+ "github.com/docker/libtrust"
+)
+
+type grantNode struct {
+ grants []*Grant
+ children map[string]*grantNode
+}
+
+type memoryGraph struct {
+ roots map[string]*grantNode
+}
+
+func newGrantNode() *grantNode {
+ return &grantNode{
+ grants: []*Grant{},
+ children: map[string]*grantNode{},
+ }
+}
+
+// NewMemoryGraph returns a new in memory trust graph created from
+// a static list of grants. This graph is immutable after creation
+// and any alterations should create a new instance.
+func NewMemoryGraph(grants []*Grant) TrustGraph {
+ roots := map[string]*grantNode{}
+ for _, grant := range grants {
+ parts := strings.Split(grant.Grantee, "/")
+ nodes := roots
+ var node *grantNode
+ var nodeOk bool
+ for _, part := range parts {
+ node, nodeOk = nodes[part]
+ if !nodeOk {
+ node = newGrantNode()
+ nodes[part] = node
+ }
+ if part != "" {
+ node.grants = append(node.grants, grant)
+ }
+ nodes = node.children
+ }
+ }
+ return &memoryGraph{roots}
+}
+
+func (g *memoryGraph) getGrants(name string) []*Grant {
+ nameParts := strings.Split(name, "/")
+ nodes := g.roots
+ var node *grantNode
+ var nodeOk bool
+ for _, part := range nameParts {
+ node, nodeOk = nodes[part]
+ if !nodeOk {
+ return nil
+ }
+ nodes = node.children
+ }
+ return node.grants
+}
+
+func isSubName(name, sub string) bool {
+ if strings.HasPrefix(name, sub) {
+ if len(name) == len(sub) || name[len(sub)] == '/' {
+ return true
+ }
+ }
+ return false
+}
+
+type walkFunc func(*Grant, []*Grant) bool
+
+func foundWalkFunc(*Grant, []*Grant) bool {
+ return true
+}
+
+func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool {
+ if visited == nil {
+ visited = map[*Grant]bool{}
+ }
+ grants := g.getGrants(start)
+ subGrants := make([]*Grant, 0, len(grants))
+ for _, grant := range grants {
+ if visited[grant] {
+ continue
+ }
+ visited[grant] = true
+ if grant.Permission&permission == permission {
+ if isSubName(target, grant.Subject) {
+ if f(grant, chain) {
+ return true
+ }
+ } else {
+ subGrants = append(subGrants, grant)
+ }
+ }
+ }
+ for _, grant := range subGrants {
+ var chainCopy []*Grant
+ if collect {
+ chainCopy = make([]*Grant, len(chain)+1)
+ copy(chainCopy, chain)
+ chainCopy[len(chainCopy)-1] = grant
+ } else {
+ chainCopy = nil
+ }
+
+ if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) {
+ return true
+ }
+ }
+ return false
+}
+
+func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) {
+ return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil
+}
+
+func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) {
+ grants := [][]*Grant{}
+ collect := func(grant *Grant, chain []*Grant) bool {
+ grantChain := make([]*Grant, len(chain)+1)
+ copy(grantChain, chain)
+ grantChain[len(grantChain)-1] = grant
+ grants = append(grants, grantChain)
+ return false
+ }
+ g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true)
+ return grants, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go
new file mode 100644
index 00000000..49fd0f3b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go
@@ -0,0 +1,174 @@
+package trustgraph
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/docker/libtrust"
+)
+
+func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) {
+ grants := make([]*Grant, count)
+ keys := make([]libtrust.PrivateKey, count)
+ for i := 0; i < count; i++ {
+ pk, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ panic(err)
+ }
+ grant := &Grant{
+ Subject: fmt.Sprintf("/user-%d", i+1),
+ Permission: 0x0f,
+ Grantee: pk.KeyID(),
+ }
+ keys[i] = pk
+ grants[i] = grant
+ }
+ return grants, keys
+}
+
+func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) {
+ if ok, err := g.Verify(k, target, permission); err != nil {
+ t.Fatalf("Unexpected error during verification: %s", err)
+ } else if !ok {
+ t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target)
+ }
+}
+
+func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) {
+ if ok, err := g.Verify(k, target, permission); err != nil {
+ t.Fatalf("Unexpected error during verification: %s", err)
+ } else if ok {
+ t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target)
+ }
+}
+
+func TestVerify(t *testing.T) {
+ grants, keys := createTestKeysAndGrants(4)
+ extraGrants := make([]*Grant, 3)
+ extraGrants[0] = &Grant{
+ Subject: "/user-3",
+ Permission: 0x0f,
+ Grantee: "/user-2",
+ }
+ extraGrants[1] = &Grant{
+ Subject: "/user-3/sub-project",
+ Permission: 0x0f,
+ Grantee: "/user-4",
+ }
+ extraGrants[2] = &Grant{
+ Subject: "/user-4",
+ Permission: 0x07,
+ Grantee: "/user-1",
+ }
+ grants = append(grants, extraGrants...)
+
+ g := NewMemoryGraph(grants)
+
+ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
+ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f)
+ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f)
+ testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f)
+ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f)
+ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f)
+ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f)
+
+ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f)
+ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f)
+ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f)
+ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f)
+ testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f)
+ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f)
+ testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f)
+}
+
+func TestCircularWalk(t *testing.T) {
+ grants, keys := createTestKeysAndGrants(3)
+ user1Grant := &Grant{
+ Subject: "/user-2",
+ Permission: 0x0f,
+ Grantee: "/user-1",
+ }
+ user2Grant := &Grant{
+ Subject: "/user-1",
+ Permission: 0x0f,
+ Grantee: "/user-2",
+ }
+ grants = append(grants, user1Grant, user2Grant)
+
+ g := NewMemoryGraph(grants)
+
+ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
+ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f)
+ testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f)
+
+ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f)
+ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f)
+}
+
+func assertGrantSame(t *testing.T, actual, expected *Grant) {
+ if actual != expected {
+ t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual)
+ }
+}
+
+func TestGetGrants(t *testing.T) {
+ grants, keys := createTestKeysAndGrants(5)
+ extraGrants := make([]*Grant, 4)
+ extraGrants[0] = &Grant{
+ Subject: "/user-3/friend-project",
+ Permission: 0x0f,
+ Grantee: "/user-2/friends",
+ }
+ extraGrants[1] = &Grant{
+ Subject: "/user-3/sub-project",
+ Permission: 0x0f,
+ Grantee: "/user-4",
+ }
+ extraGrants[2] = &Grant{
+ Subject: "/user-2/friends",
+ Permission: 0x0f,
+ Grantee: "/user-5/fun-project",
+ }
+ extraGrants[3] = &Grant{
+ Subject: "/user-5/fun-project",
+ Permission: 0x0f,
+ Grantee: "/user-1",
+ }
+ grants = append(grants, extraGrants...)
+
+ g := NewMemoryGraph(grants)
+
+ grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f)
+ if err != nil {
+ t.Fatalf("Error getting grants: %s", err)
+ }
+ if len(grantChains) != 1 {
+ t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains))
+ }
+ if len(grantChains[0]) != 2 {
+ t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0]))
+ }
+ assertGrantSame(t, grantChains[0][0], grants[3])
+ assertGrantSame(t, grantChains[0][1], extraGrants[1])
+
+ grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f)
+ if err != nil {
+ t.Fatalf("Error getting grants: %s", err)
+ }
+ if len(grantChains) != 1 {
+ t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains))
+ }
+ if len(grantChains[0]) != 4 {
+ t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0]))
+ }
+ assertGrantSame(t, grantChains[0][0], grants[0])
+ assertGrantSame(t, grantChains[0][1], extraGrants[3])
+ assertGrantSame(t, grantChains[0][2], extraGrants[2])
+ assertGrantSame(t, grantChains[0][3], extraGrants[0])
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go
new file mode 100644
index 00000000..7a74b553
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go
@@ -0,0 +1,227 @@
+package trustgraph
+
+import (
+ "crypto/x509"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/docker/libtrust"
+)
+
+type jsonGrant struct {
+ Subject string `json:"subject"`
+ Permission uint16 `json:"permission"`
+ Grantee string `json:"grantee"`
+}
+
+type jsonRevocation struct {
+ Subject string `json:"subject"`
+ Revocation uint16 `json:"revocation"`
+ Grantee string `json:"grantee"`
+}
+
+type jsonStatement struct {
+ Revocations []*jsonRevocation `json:"revocations"`
+ Grants []*jsonGrant `json:"grants"`
+ Expiration time.Time `json:"expiration"`
+ IssuedAt time.Time `json:"issuedAt"`
+}
+
+func (g *jsonGrant) Grant(statement *Statement) *Grant {
+ return &Grant{
+ Subject: g.Subject,
+ Permission: g.Permission,
+ Grantee: g.Grantee,
+ statement: statement,
+ }
+}
+
+// Statement represents a set of grants made from a verifiable
+// authority. A statement has an expiration associated with it
+// set by the authority.
+type Statement struct {
+ jsonStatement
+
+ signature *libtrust.JSONSignature
+}
+
+// IsExpired returns whether the statement has expired
+func (s *Statement) IsExpired() bool {
+ return s.Expiration.Before(time.Now().Add(-10 * time.Second))
+}
+
+// Bytes returns an indented json representation of the statement
+// in a byte array. This value can be written to a file or stream
+// without alteration.
+func (s *Statement) Bytes() ([]byte, error) {
+ return s.signature.PrettySignature("signatures")
+}
+
+// LoadStatement loads and verifies a statement from an input stream.
+func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ js, err := libtrust.ParsePrettySignature(b, "signatures")
+ if err != nil {
+ return nil, err
+ }
+ payload, err := js.Payload()
+ if err != nil {
+ return nil, err
+ }
+ var statement Statement
+ err = json.Unmarshal(payload, &statement.jsonStatement)
+ if err != nil {
+ return nil, err
+ }
+
+ if authority == nil {
+ _, err = js.Verify()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ _, err = js.VerifyChains(authority)
+ if err != nil {
+ return nil, err
+ }
+ }
+ statement.signature = js
+
+ return &statement, nil
+}
+
+// CreateStatements creates and signs a statement from a stream of grants
+// and revocations in a JSON array.
+func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) {
+ var statement Statement
+ err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants)
+ if err != nil {
+ return nil, err
+ }
+ err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations)
+ if err != nil {
+ return nil, err
+ }
+ statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration)
+ statement.jsonStatement.IssuedAt = time.Now().UTC()
+
+ b, err := json.MarshalIndent(&statement.jsonStatement, "", " ")
+ if err != nil {
+ return nil, err
+ }
+
+ statement.signature, err = libtrust.NewJSONSignature(b)
+ if err != nil {
+ return nil, err
+ }
+ err = statement.signature.SignWithChain(key, chain)
+ if err != nil {
+ return nil, err
+ }
+
+ return &statement, nil
+}
+
+type statementList []*Statement
+
+func (s statementList) Len() int {
+ return len(s)
+}
+
+func (s statementList) Less(i, j int) bool {
+ return s[i].IssuedAt.Before(s[j].IssuedAt)
+}
+
+func (s statementList) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// CollapseStatements returns a single list of the valid statements as well as the
+// time when the next grant will expire.
+func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) {
+ sorted := make(statementList, 0, len(statements))
+ for _, statement := range statements {
+ if useExpired || !statement.IsExpired() {
+ sorted = append(sorted, statement)
+ }
+ }
+ sort.Sort(sorted)
+
+ var minExpired time.Time
+ var grantCount int
+ roots := map[string]*grantNode{}
+ for i, statement := range sorted {
+ if statement.Expiration.Before(minExpired) || i == 0 {
+ minExpired = statement.Expiration
+ }
+ for _, grant := range statement.Grants {
+ parts := strings.Split(grant.Grantee, "/")
+ nodes := roots
+ g := grant.Grant(statement)
+ grantCount = grantCount + 1
+
+ for _, part := range parts {
+ node, nodeOk := nodes[part]
+ if !nodeOk {
+ node = newGrantNode()
+ nodes[part] = node
+ }
+ node.grants = append(node.grants, g)
+ nodes = node.children
+ }
+ }
+
+ for _, revocation := range statement.Revocations {
+ parts := strings.Split(revocation.Grantee, "/")
+ nodes := roots
+
+ var node *grantNode
+ var nodeOk bool
+ for _, part := range parts {
+ node, nodeOk = nodes[part]
+ if !nodeOk {
+ break
+ }
+ nodes = node.children
+ }
+ if node != nil {
+ for _, grant := range node.grants {
+ if isSubName(grant.Subject, revocation.Subject) {
+ grant.Permission = grant.Permission &^ revocation.Revocation
+ }
+ }
+ }
+ }
+ }
+
+ retGrants := make([]*Grant, 0, grantCount)
+ for _, rootNodes := range roots {
+ retGrants = append(retGrants, rootNodes.grants...)
+ }
+
+ return retGrants, minExpired, nil
+}
+
+// FilterStatements filters the statements to statements including the given grants.
+func FilterStatements(grants []*Grant) ([]*Statement, error) {
+ statements := map[*Statement]bool{}
+ for _, grant := range grants {
+ if grant.statement != nil {
+ statements[grant.statement] = true
+ }
+ }
+ retStatements := make([]*Statement, len(statements))
+ var i int
+ for statement := range statements {
+ retStatements[i] = statement
+ i++
+ }
+ return retStatements, nil
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go
new file mode 100644
index 00000000..e5094686
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go
@@ -0,0 +1,417 @@
+package trustgraph
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/docker/libtrust"
+ "github.com/docker/libtrust/testutil"
+)
+
+const testStatementExpiration = time.Hour * 5
+
+func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) {
+ var statement Statement
+
+ statement.Grants = make([]*jsonGrant, len(grants))
+ for i, grant := range grants {
+ statement.Grants[i] = &jsonGrant{
+ Subject: grant.Subject,
+ Permission: grant.Permission,
+ Grantee: grant.Grantee,
+ }
+ }
+ statement.IssuedAt = time.Now()
+ statement.Expiration = time.Now().Add(testStatementExpiration)
+ statement.Revocations = make([]*jsonRevocation, 0)
+
+ marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ")
+ if err != nil {
+ return nil, err
+ }
+
+ sig, err := libtrust.NewJSONSignature(marshalled)
+ if err != nil {
+ return nil, err
+ }
+ err = sig.SignWithChain(key, chain)
+ if err != nil {
+ return nil, err
+ }
+ statement.signature = sig
+
+ return &statement, nil
+}
+
+func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) {
+ caKey, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generating key: %s", err)
+ }
+ ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey())
+ if err != nil {
+ t.Fatalf("Error generating ca: %s", err)
+ }
+
+ parent := ca
+ parentKey := caKey
+ chain := make([]*x509.Certificate, chainLen)
+ for i := chainLen - 1; i > 0; i-- {
+ intermediatekey, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generate key: %s", err)
+ }
+ chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
+ if err != nil {
+ t.Fatalf("Error generating intermdiate certificate: %s", err)
+ }
+ parent = chain[i]
+ parentKey = intermediatekey
+ }
+ trustKey, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ t.Fatalf("Error generate key: %s", err)
+ }
+ chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent)
+ if err != nil {
+ t.Fatalf("Error generate trust cert: %s", err)
+ }
+
+ caPool := x509.NewCertPool()
+ caPool.AddCert(ca)
+
+ return trustKey, caPool, chain
+}
+
+func TestLoadStatement(t *testing.T) {
+ grantCount := 4
+ grants, _ := createTestKeysAndGrants(grantCount)
+
+ trustKey, caPool, chain := generateTrustChain(t, 6)
+
+ statement, err := generateStatement(grants, trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+
+ statementBytes, err := statement.Bytes()
+ if err != nil {
+ t.Fatalf("Error getting statement bytes: %s", err)
+ }
+
+ s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool)
+ if err != nil {
+ t.Fatalf("Error loading statement: %s", err)
+ }
+ if len(s2.Grants) != grantCount {
+ t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants))
+ }
+
+ pool := x509.NewCertPool()
+ _, err = LoadStatement(bytes.NewReader(statementBytes), pool)
+ if err == nil {
+ t.Fatalf("No error thrown verifying without an authority")
+ } else if _, ok := err.(x509.UnknownAuthorityError); !ok {
+ t.Fatalf("Unexpected error verifying without authority: %s", err)
+ }
+
+ s2, err = LoadStatement(bytes.NewReader(statementBytes), nil)
+ if err != nil {
+ t.Fatalf("Error loading statement: %s", err)
+ }
+ if len(s2.Grants) != grantCount {
+ t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants))
+ }
+
+ badData := make([]byte, len(statementBytes))
+ copy(badData, statementBytes)
+ badData[0] = '['
+ _, err = LoadStatement(bytes.NewReader(badData), nil)
+ if err == nil {
+ t.Fatalf("No error thrown parsing bad json")
+ }
+
+ alteredData := make([]byte, len(statementBytes))
+ copy(alteredData, statementBytes)
+ alteredData[30] = '0'
+ _, err = LoadStatement(bytes.NewReader(alteredData), nil)
+ if err == nil {
+ t.Fatalf("No error thrown from bad data")
+ }
+}
+
+func TestCollapseGrants(t *testing.T) {
+ grantCount := 8
+ grants, keys := createTestKeysAndGrants(grantCount)
+ linkGrants := make([]*Grant, 4)
+ linkGrants[0] = &Grant{
+ Subject: "/user-3",
+ Permission: 0x0f,
+ Grantee: "/user-2",
+ }
+ linkGrants[1] = &Grant{
+ Subject: "/user-3/sub-project",
+ Permission: 0x0f,
+ Grantee: "/user-4",
+ }
+ linkGrants[2] = &Grant{
+ Subject: "/user-6",
+ Permission: 0x0f,
+ Grantee: "/user-7",
+ }
+ linkGrants[3] = &Grant{
+ Subject: "/user-6/sub-project/specific-app",
+ Permission: 0x0f,
+ Grantee: "/user-5",
+ }
+ trustKey, pool, chain := generateTrustChain(t, 3)
+
+ statements := make([]*Statement, 3)
+ var err error
+ statements[0], err = generateStatement(grants[0:4], trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ statements[1], err = generateStatement(grants[4:], trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ statements[2], err = generateStatement(linkGrants, trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+
+ statementsCopy := make([]*Statement, len(statements))
+ for i, statement := range statements {
+ b, err := statement.Bytes()
+ if err != nil {
+ t.Fatalf("Error getting statement bytes: %s", err)
+ }
+ verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool)
+ if err != nil {
+ t.Fatalf("Error loading statement: %s", err)
+ }
+ // Force sort by reversing order
+ statementsCopy[len(statementsCopy)-i-1] = verifiedStatement
+ }
+ statements = statementsCopy
+
+ collapsedGrants, expiration, err := CollapseStatements(statements, false)
+ if len(collapsedGrants) != 12 {
+ t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants))
+ }
+ if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) {
+ t.Fatalf("Unexpected expiration time: %s", expiration.String())
+ }
+ g := NewMemoryGraph(collapsedGrants)
+
+ testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f)
+ testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f)
+ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f)
+ testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f)
+ testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f)
+ testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f)
+ testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f)
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f)
+ testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f)
+ testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f)
+ testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f)
+ testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f)
+
+ testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f)
+ testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f)
+ testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f)
+
+ // Add revocation grant
+ statements = append(statements, &Statement{
+ jsonStatement{
+ IssuedAt: time.Now(),
+ Expiration: time.Now().Add(testStatementExpiration),
+ Grants: []*jsonGrant{},
+ Revocations: []*jsonRevocation{
+ &jsonRevocation{
+ Subject: "/user-1",
+ Revocation: 0x0f,
+ Grantee: keys[0].KeyID(),
+ },
+ &jsonRevocation{
+ Subject: "/user-2",
+ Revocation: 0x08,
+ Grantee: keys[1].KeyID(),
+ },
+ &jsonRevocation{
+ Subject: "/user-6",
+ Revocation: 0x0f,
+ Grantee: "/user-7",
+ },
+ &jsonRevocation{
+ Subject: "/user-9",
+ Revocation: 0x0f,
+ Grantee: "/user-10",
+ },
+ },
+ },
+ nil,
+ })
+
+ collapsedGrants, expiration, err = CollapseStatements(statements, false)
+ if len(collapsedGrants) != 12 {
+ t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants))
+ }
+ if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) {
+ t.Fatalf("Unexpected expiration time: %s", expiration.String())
+ }
+ g = NewMemoryGraph(collapsedGrants)
+
+ testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f)
+ testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f)
+ testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f)
+
+ testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07)
+}
+
+func TestFilterStatements(t *testing.T) {
+ grantCount := 8
+ grants, keys := createTestKeysAndGrants(grantCount)
+ linkGrants := make([]*Grant, 3)
+ linkGrants[0] = &Grant{
+ Subject: "/user-3",
+ Permission: 0x0f,
+ Grantee: "/user-2",
+ }
+ linkGrants[1] = &Grant{
+ Subject: "/user-5",
+ Permission: 0x0f,
+ Grantee: "/user-4",
+ }
+ linkGrants[2] = &Grant{
+ Subject: "/user-7",
+ Permission: 0x0f,
+ Grantee: "/user-6",
+ }
+
+ trustKey, _, chain := generateTrustChain(t, 3)
+
+ statements := make([]*Statement, 5)
+ var err error
+ statements[0], err = generateStatement(grants[0:2], trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ statements[1], err = generateStatement(grants[2:4], trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ statements[2], err = generateStatement(grants[4:6], trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ statements[3], err = generateStatement(grants[6:], trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ statements[4], err = generateStatement(linkGrants, trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error generating statement: %s", err)
+ }
+ collapsed, _, err := CollapseStatements(statements, false)
+ if err != nil {
+ t.Fatalf("Error collapsing grants: %s", err)
+ }
+
+ // Filter 1, all 5 statements
+ filter1, err := FilterStatements(collapsed)
+ if err != nil {
+ t.Fatalf("Error filtering statements: %s", err)
+ }
+ if len(filter1) != 5 {
+ t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1))
+ }
+
+ // Filter 2, one statement
+ filter2, err := FilterStatements([]*Grant{collapsed[0]})
+ if err != nil {
+ t.Fatalf("Error filtering statements: %s", err)
+ }
+ if len(filter2) != 1 {
+ t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2))
+ }
+
+ // Filter 3, 2 statements, from graph lookup
+ g := NewMemoryGraph(collapsed)
+ lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f)
+ if err != nil {
+ t.Fatalf("Error looking up grants: %s", err)
+ }
+ if len(lookupGrants) != 1 {
+ t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants))
+ }
+ if len(lookupGrants[0]) != 2 {
+ t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants))
+ }
+ filter3, err := FilterStatements(lookupGrants[0])
+ if err != nil {
+ t.Fatalf("Error filtering statements: %s", err)
+ }
+ if len(filter3) != 2 {
+ t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3))
+ }
+
+}
+
+func TestCreateStatement(t *testing.T) {
+ grantJSON := bytes.NewReader([]byte(`[
+ {
+ "subject": "/user-2",
+ "permission": 15,
+ "grantee": "/user-1"
+ },
+ {
+ "subject": "/user-7",
+ "permission": 1,
+ "grantee": "/user-9"
+ },
+ {
+ "subject": "/user-3",
+ "permission": 15,
+ "grantee": "/user-2"
+ }
+]`))
+ revocationJSON := bytes.NewReader([]byte(`[
+ {
+ "subject": "user-8",
+ "revocation": 12,
+ "grantee": "user-9"
+ }
+]`))
+
+ trustKey, pool, chain := generateTrustChain(t, 3)
+
+ statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain)
+ if err != nil {
+ t.Fatalf("Error creating statement: %s", err)
+ }
+
+ b, err := statement.Bytes()
+ if err != nil {
+ t.Fatalf("Error retrieving bytes: %s", err)
+ }
+
+ verified, err := LoadStatement(bytes.NewReader(b), pool)
+ if err != nil {
+ t.Fatalf("Error loading statement: %s", err)
+ }
+
+ if len(verified.Grants) != 3 {
+ t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants))
+ }
+
+ if len(verified.Revocations) != 1 {
+ t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations))
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/Godeps/_workspace/src/github.com/docker/libtrust/util.go
new file mode 100644
index 00000000..45dc3e18
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/util.go
@@ -0,0 +1,361 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/elliptic"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// LoadOrCreateTrustKey will load a PrivateKey from the specified path
+func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
+ if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
+ return nil, err
+ }
+
+ trustKey, err := LoadKeyFile(trustKeyPath)
+ if err == ErrKeyFileDoesNotExist {
+ trustKey, err = GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("error generating key: %s", err)
+ }
+
+ if err := SaveKey(trustKeyPath, trustKey); err != nil {
+ return nil, fmt.Errorf("error saving key file: %s", err)
+ }
+
+ dir, file := filepath.Split(trustKeyPath)
+ if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
+ return nil, fmt.Errorf("error saving public key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("error loading key file: %s", err)
+ }
+ return trustKey, nil
+}
+
+// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
+// based authentication from the specified dockerUrl, the rootConfigPath and
+// the server name to which it is connecting.
+// If trustUnknownHosts is true it will automatically add the host to the
+// known-hosts.json in rootConfigPath.
+func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ trustKeyPath := filepath.Join(rootConfigPath, "key.json")
+ knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
+
+ u, err := url.Parse(dockerUrl)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse machine url")
+ }
+
+ if u.Scheme == "unix" {
+ return nil, nil
+ }
+
+ addr := u.Host
+ proto := "tcp"
+
+ trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load trust key: %s", err)
+ }
+
+ knownHosts, err := LoadKeySetFile(knownHostsPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
+ }
+
+ allowedHosts, err := FilterByHosts(knownHosts, addr, false)
+ if err != nil {
+ return nil, fmt.Errorf("error filtering hosts: %s", err)
+ }
+
+ certPool, err := GenerateCACertPool(trustKey, allowedHosts)
+ if err != nil {
+ return nil, fmt.Errorf("Could not create CA pool: %s", err)
+ }
+
+ tlsConfig.ServerName = serverName
+ tlsConfig.RootCAs = certPool
+
+ x509Cert, err := GenerateSelfSignedClientCert(trustKey)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ tlsConfig.InsecureSkipVerify = true
+
+ testConn, err := tls.Dial(proto, addr, tlsConfig)
+ if err != nil {
+ return nil, fmt.Errorf("tls Handshake error: %s", err)
+ }
+
+ opts := x509.VerifyOptions{
+ Roots: tlsConfig.RootCAs,
+ CurrentTime: time.Now(),
+ DNSName: tlsConfig.ServerName,
+ Intermediates: x509.NewCertPool(),
+ }
+
+ certs := testConn.ConnectionState().PeerCertificates
+ for i, cert := range certs {
+ if i == 0 {
+ continue
+ }
+ opts.Intermediates.AddCert(cert)
+ }
+
+ if _, err := certs[0].Verify(opts); err != nil {
+ if _, ok := err.(x509.UnknownAuthorityError); ok {
+ if trustUnknownHosts {
+ pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("error extracting public key from cert: %s", err)
+ }
+
+ pubKey.AddExtendedField("hosts", []string{addr})
+
+ if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
+ return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
+ }
+ } else {
+ return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
+ }
+ }
+ }
+
+ testConn.Close()
+ tlsConfig.InsecureSkipVerify = false
+
+ return tlsConfig, nil
+}
+
+// joseBase64UrlEncode encodes the given data using the standard base64 url
+// encoding format but with all trailing '=' characters ommitted in accordance
+// with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlEncode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// joseBase64UrlDecode decodes the given string using the standard base64 url
+// decoder but first adds the appropriate number of trailing '=' characters in
+// accordance with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlDecode(s string) ([]byte, error) {
+ switch len(s) % 4 {
+ case 0:
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ default:
+ return nil, errors.New("illegal base64url string")
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
+
+func keyIDEncode(b []byte) string {
+ s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
+ var buf bytes.Buffer
+ var i int
+ for i = 0; i < len(s)/4-1; i++ {
+ start := i * 4
+ end := start + 4
+ buf.WriteString(s[start:end] + ":")
+ }
+ buf.WriteString(s[i*4:])
+ return buf.String()
+}
+
+func keyIDFromCryptoKey(pubKey PublicKey) string {
+ // Generate and return a 'libtrust' fingerprint of the public key.
+ // For an RSA key this should be:
+ // SHA256(DER encoded ASN1)
+ // Then truncated to 240 bits and encoded into 12 base32 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
+ if err != nil {
+ return ""
+ }
+ hasher := crypto.SHA256.New()
+ hasher.Write(derBytes)
+ return keyIDEncode(hasher.Sum(nil)[:30])
+}
+
+func stringFromMap(m map[string]interface{}, key string) (string, error) {
+ val, ok := m[key]
+ if !ok {
+ return "", fmt.Errorf("%q value not specified", key)
+ }
+
+ str, ok := val.(string)
+ if !ok {
+ return "", fmt.Errorf("%q value must be a string", key)
+ }
+ delete(m, key)
+
+ return str, nil
+}
+
+func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ curveByteLen := (curve.Params().BitSize + 7) >> 3
+
+ cBytes, err := joseBase64UrlDecode(cB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ cByteLength := len(cBytes)
+ if cByteLength != curveByteLen {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
+ }
+ return new(big.Int).SetBytes(cBytes), nil
+}
+
+func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ dBytes, err := joseBase64UrlDecode(dB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := curve.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ dByteLength := len(dBytes)
+
+ if dByteLength != octetLength {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
+ }
+
+ return new(big.Int).SetBytes(dBytes), nil
+}
+
+func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
+ nBytes, err := joseBase64UrlDecode(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(nBytes), nil
+}
+
+func serializeRSAPublicExponentParam(e int) []byte {
+ // We MUST use the minimum number of octets to represent E.
+ // E is supposed to be 65537 for performance and security reasons
+ // and is what golang's rsa package generates, but it might be
+ // different if imported from some other generator.
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(e))
+ var i int
+ for i = 0; i < 8; i++ {
+ if buf[i] != 0 {
+ break
+ }
+ }
+ return buf[i:]
+}
+
+func parseRSAPublicExponentParam(eB64Url string) (int, error) {
+ eBytes, err := joseBase64UrlDecode(eB64Url)
+ if err != nil {
+ return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ // Only the minimum number of bytes were used to represent E, but
+ // binary.BigEndian.Uint32 expects at least 4 bytes, so we need
+ // to add zero padding if necassary.
+ byteLen := len(eBytes)
+ buf := make([]byte, 4-byteLen, 4)
+ eBytes = append(buf, eBytes...)
+
+ return int(binary.BigEndian.Uint32(eBytes)), nil
+}
+
+func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
+ b64Url, err := stringFromMap(m, key)
+ if err != nil {
+ return nil, err
+ }
+
+ paramBytes, err := joseBase64UrlDecode(b64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(paramBytes), nil
+}
+
+func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
+ pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
+ for k, v := range headers {
+ switch val := v.(type) {
+ case string:
+ pemBlock.Headers[k] = val
+ case []string:
+ if k == "hosts" {
+ pemBlock.Headers[k] = strings.Join(val, ",")
+ } else {
+ // Return error, non-encodable type
+ }
+ default:
+ // Return error, non-encodable type
+ }
+ }
+
+ return pemBlock, nil
+}
+
+func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
+ cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
+ }
+
+ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ addPEMHeadersToKey(pemBlock, pubKey)
+
+ return pubKey, nil
+}
+
+func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
+ for key, value := range pemBlock.Headers {
+ var safeVal interface{}
+ if key == "hosts" {
+ safeVal = strings.Split(value, ",")
+ } else {
+ safeVal = value
+ }
+ pubKey.AddExtendedField(key, safeVal)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go
new file mode 100644
index 00000000..ee54f5b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go
@@ -0,0 +1,23 @@
+package libtrust
+
+import (
+ "encoding/pem"
+ "reflect"
+ "testing"
+)
+
+func TestAddPEMHeadersToKey(t *testing.T) {
+ pk := &rsaPublicKey{nil, map[string]interface{}{}}
+ blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}}
+ addPEMHeadersToKey(blk, pk)
+
+ val := pk.GetExtendedField("hosts")
+ hosts, ok := val.([]string)
+ if !ok {
+ t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val))
+ }
+ expected := []string{"localhost", "127.0.0.1"}
+ if !reflect.DeepEqual(hosts, expected) {
+ t.Errorf("hosts(%v), expected %v", hosts, expected)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml
new file mode 100644
index 00000000..d87d4657
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+ - 1.0
+ - 1.1
+ - 1.2
+ - tip
diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE
new file mode 100644
index 00000000..0e5fb872
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/gorilla/context/README.md
new file mode 100644
index 00000000..c60a31b0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/context/README.md
@@ -0,0 +1,7 @@
+context
+=======
+[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
+
+gorilla/context is a general purpose registry for global request variables.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/gorilla/context/context.go
new file mode 100644
index 00000000..81cb128b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/context/context.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "net/http"
+ "sync"
+ "time"
+)
+
+var (
+ mutex sync.RWMutex
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+)
+
+// Set stores a value for a given key in a given request.
+func Set(r *http.Request, key, val interface{}) {
+ mutex.Lock()
+ if data[r] == nil {
+ data[r] = make(map[interface{}]interface{})
+ datat[r] = time.Now().Unix()
+ }
+ data[r][key] = val
+ mutex.Unlock()
+}
+
+// Get returns a value stored for a given key in a given request.
+func Get(r *http.Request, key interface{}) interface{} {
+ mutex.RLock()
+ if ctx := data[r]; ctx != nil {
+ value := ctx[key]
+ mutex.RUnlock()
+ return value
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetOk returns stored value and presence state like multi-value return of map access.
+func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
+ mutex.RLock()
+ if _, ok := data[r]; ok {
+ value, ok := data[r][key]
+ mutex.RUnlock()
+ return value, ok
+ }
+ mutex.RUnlock()
+ return nil, false
+}
+
+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
+func GetAll(r *http.Request) map[interface{}]interface{} {
+ mutex.RLock()
+ if context, ok := data[r]; ok {
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result
+ }
+ mutex.RUnlock()
+ return nil
+}
+
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
+ mutex.RLock()
+ context, ok := data[r]
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
+ mutex.RUnlock()
+ return result, ok
+}
+
+// Delete removes a value stored for a given key in a given request.
+func Delete(r *http.Request, key interface{}) {
+ mutex.Lock()
+ if data[r] != nil {
+ delete(data[r], key)
+ }
+ mutex.Unlock()
+}
+
+// Clear removes all values stored for a given request.
+//
+// This is usually called by a handler wrapper to clean up request
+// variables at the end of a request lifetime. See ClearHandler().
+func Clear(r *http.Request) {
+ mutex.Lock()
+ clear(r)
+ mutex.Unlock()
+}
+
+// clear is Clear without the lock.
+func clear(r *http.Request) {
+ delete(data, r)
+ delete(datat, r)
+}
+
+// Purge removes request data stored for longer than maxAge, in seconds.
+// It returns the amount of requests removed.
+//
+// If maxAge <= 0, all request data is removed.
+//
+// This is only used for sanity check: in case context cleaning was not
+// properly set some request data can be kept forever, consuming an increasing
+// amount of memory. In case this is detected, Purge() must be called
+// periodically until the problem is fixed.
+func Purge(maxAge int) int {
+ mutex.Lock()
+ count := 0
+ if maxAge <= 0 {
+ count = len(data)
+ data = make(map[*http.Request]map[interface{}]interface{})
+ datat = make(map[*http.Request]int64)
+ } else {
+ min := time.Now().Unix() - int64(maxAge)
+ for r := range data {
+ if datat[r] < min {
+ clear(r)
+ count++
+ }
+ }
+ }
+ mutex.Unlock()
+ return count
+}
+
+// ClearHandler wraps an http.Handler and clears request values at the end
+// of a request lifetime.
+func ClearHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer Clear(r)
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go
new file mode 100644
index 00000000..6ada8ec3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go
@@ -0,0 +1,161 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+ "net/http"
+ "testing"
+)
+
+type keyType int
+
+const (
+ key1 keyType = iota
+ key2
+)
+
+func TestContext(t *testing.T) {
+ assertEqual := func(val interface{}, exp interface{}) {
+ if val != exp {
+ t.Errorf("Expected %v, got %v.", exp, val)
+ }
+ }
+
+ r, _ := http.NewRequest("GET", "http://localhost:8080/", nil)
+ emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil)
+
+ // Get()
+ assertEqual(Get(r, key1), nil)
+
+ // Set()
+ Set(r, key1, "1")
+ assertEqual(Get(r, key1), "1")
+ assertEqual(len(data[r]), 1)
+
+ Set(r, key2, "2")
+ assertEqual(Get(r, key2), "2")
+ assertEqual(len(data[r]), 2)
+
+ //GetOk
+ value, ok := GetOk(r, key1)
+ assertEqual(value, "1")
+ assertEqual(ok, true)
+
+ value, ok = GetOk(r, "not exists")
+ assertEqual(value, nil)
+ assertEqual(ok, false)
+
+ Set(r, "nil value", nil)
+ value, ok = GetOk(r, "nil value")
+ assertEqual(value, nil)
+ assertEqual(ok, true)
+
+ // GetAll()
+ values := GetAll(r)
+ assertEqual(len(values), 3)
+
+ // GetAll() for empty request
+ values = GetAll(emptyR)
+ if values != nil {
+ t.Error("GetAll didn't return nil value for invalid request")
+ }
+
+ // GetAllOk()
+ values, ok = GetAllOk(r)
+ assertEqual(len(values), 3)
+ assertEqual(ok, true)
+
+ // GetAllOk() for empty request
+ values, ok = GetAllOk(emptyR)
+ assertEqual(value, nil)
+ assertEqual(ok, false)
+
+ // Delete()
+ Delete(r, key1)
+ assertEqual(Get(r, key1), nil)
+ assertEqual(len(data[r]), 2)
+
+ Delete(r, key2)
+ assertEqual(Get(r, key2), nil)
+ assertEqual(len(data[r]), 1)
+
+ // Clear()
+ Clear(r)
+ assertEqual(len(data), 0)
+}
+
+func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) {
+ <-wait
+ for i := 0; i < iterations; i++ {
+ Get(r, key)
+ }
+ done <- struct{}{}
+
+}
+
+func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) {
+ <-wait
+ for i := 0; i < iterations; i++ {
+ Get(r, key)
+ }
+ done <- struct{}{}
+
+}
+
+func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) {
+
+ b.StopTimer()
+ r, _ := http.NewRequest("GET", "http://localhost:8080/", nil)
+ done := make(chan struct{})
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ wait := make(chan struct{})
+
+ for i := 0; i < numReaders; i++ {
+ go parallelReader(r, "test", iterations, wait, done)
+ }
+
+ for i := 0; i < numWriters; i++ {
+ go parallelWriter(r, "test", "123", iterations, wait, done)
+ }
+
+ close(wait)
+
+ for i := 0; i < numReaders+numWriters; i++ {
+ <-done
+ }
+
+ }
+
+}
+
+func BenchmarkMutexSameReadWrite1(b *testing.B) {
+ benchmarkMutex(b, 1, 1, 32)
+}
+func BenchmarkMutexSameReadWrite2(b *testing.B) {
+ benchmarkMutex(b, 2, 2, 32)
+}
+func BenchmarkMutexSameReadWrite4(b *testing.B) {
+ benchmarkMutex(b, 4, 4, 32)
+}
+func BenchmarkMutex1(b *testing.B) {
+ benchmarkMutex(b, 2, 8, 32)
+}
+func BenchmarkMutex2(b *testing.B) {
+ benchmarkMutex(b, 16, 4, 64)
+}
+func BenchmarkMutex3(b *testing.B) {
+ benchmarkMutex(b, 1, 2, 128)
+}
+func BenchmarkMutex4(b *testing.B) {
+ benchmarkMutex(b, 128, 32, 256)
+}
+func BenchmarkMutex5(b *testing.B) {
+ benchmarkMutex(b, 1024, 2048, 64)
+}
+func BenchmarkMutex6(b *testing.B) {
+ benchmarkMutex(b, 2048, 1024, 512)
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/gorilla/context/doc.go
new file mode 100644
index 00000000..73c74003
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/context/doc.go
@@ -0,0 +1,82 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package context stores values shared during a request lifetime.
+
+For example, a router can set variables extracted from the URL and later
+application handlers can access those values, or it can be used to store
+sessions values to be saved at the end of a request. There are several
+others common uses.
+
+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
+
+ http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
+
+Here's the basic usage: first define the keys that you will need. The key
+type is interface{} so a key can be of any type that supports equality.
+Here we define a key using a custom int type to avoid name collisions:
+
+ package foo
+
+ import (
+ "github.com/gorilla/context"
+ )
+
+ type key int
+
+ const MyKey key = 0
+
+Then set a variable. Variables are bound to an http.Request object, so you
+need a request instance to set a value:
+
+ context.Set(r, MyKey, "bar")
+
+The application can later access the variable using the same key you provided:
+
+ func MyHandler(w http.ResponseWriter, r *http.Request) {
+ // val is "bar".
+ val := context.Get(r, foo.MyKey)
+
+ // returns ("bar", true)
+ val, ok := context.GetOk(r, foo.MyKey)
+ // ...
+ }
+
+And that's all about the basic usage. We discuss some other ideas below.
+
+Any type can be stored in the context. To enforce a given type, make the key
+private and wrap Get() and Set() to accept and return values of a specific
+type:
+
+ type key int
+
+ const mykey key = 0
+
+ // GetMyKey returns a value for this package from the request values.
+ func GetMyKey(r *http.Request) SomeType {
+ if rv := context.Get(r, mykey); rv != nil {
+ return rv.(SomeType)
+ }
+ return nil
+ }
+
+ // SetMyKey sets a value for this package in the request values.
+ func SetMyKey(r *http.Request, val SomeType) {
+ context.Set(r, mykey, val)
+ }
+
+Variables must be cleared at the end of a request, to remove all values
+that were stored. This can be done in an http.Handler, after a request was
+served. Just call Clear() passing the request:
+
+ context.Clear(r)
+
+...or use ClearHandler(), which conveniently wraps an http.Handler to clear
+variables at the end of a request lifetime.
+
+The Routers from the packages gorilla/mux and gorilla/pat call Clear()
+so if you are using either of them you don't need to clear the context manually.
+*/
+package context
diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml
new file mode 100644
index 00000000..825dc3f2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+
+go:
+ - 1.0
+ - 1.1
+ - 1.2
+ - 1.3
+ - tip
diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/README.md b/Godeps/_workspace/src/github.com/gorilla/handlers/README.md
new file mode 100644
index 00000000..9be93705
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/handlers/README.md
@@ -0,0 +1,6 @@
+gorilla/handlers
+================
+[![Build Status](https://travis-ci.org/gorilla/handlers.png?branch=master)](https://travis-ci.org/gorilla/handlers)
+
+*Warning:* This package is a work in progress and the APIs are subject to change.
+Consider this a v0 project.
diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go b/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go
new file mode 100644
index 00000000..81202bbc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go
@@ -0,0 +1,67 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "compress/flate"
+ "compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+)
+
+type compressResponseWriter struct {
+ io.Writer
+ http.ResponseWriter
+}
+
+func (w *compressResponseWriter) Header() http.Header {
+ return w.ResponseWriter.Header()
+}
+
+func (w *compressResponseWriter) Write(b []byte) (int, error) {
+ h := w.ResponseWriter.Header()
+ if h.Get("Content-Type") == "" {
+ h.Set("Content-Type", http.DetectContentType(b))
+ }
+
+ return w.Writer.Write(b)
+}
+
+func CompressHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ L:
+ for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") {
+ switch strings.TrimSpace(enc) {
+ case "gzip":
+ w.Header().Set("Content-Encoding", "gzip")
+ w.Header().Add("Vary", "Accept-Encoding")
+
+ gw := gzip.NewWriter(w)
+ defer gw.Close()
+
+ w = &compressResponseWriter{
+ Writer: gw,
+ ResponseWriter: w,
+ }
+ break L
+ case "deflate":
+ w.Header().Set("Content-Encoding", "deflate")
+ w.Header().Add("Vary", "Accept-Encoding")
+
+ fw, _ := flate.NewWriter(w, flate.DefaultCompression)
+ defer fw.Close()
+
+ w = &compressResponseWriter{
+ Writer: fw,
+ ResponseWriter: w,
+ }
+ break L
+ }
+ }
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go b/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go
new file mode 100644
index 00000000..2661b399
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go
@@ -0,0 +1,65 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func compressedRequest(w *httptest.ResponseRecorder, compression string) {
+ CompressHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ for i := 0; i < 1024; i++ {
+ io.WriteString(w, "Gorilla!\n")
+ }
+ })).ServeHTTP(w, &http.Request{
+ Method: "GET",
+ Header: http.Header{
+ "Accept-Encoding": []string{compression},
+ },
+ })
+
+}
+
+func TestCompressHandlerGzip(t *testing.T) {
+ w := httptest.NewRecorder()
+ compressedRequest(w, "gzip")
+ if w.HeaderMap.Get("Content-Encoding") != "gzip" {
+ t.Fatalf("wrong content encoding, got %d want %d", w.HeaderMap.Get("Content-Encoding"), "gzip")
+ }
+ if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
+ t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8")
+ }
+ if w.Body.Len() != 72 {
+ t.Fatalf("wrong len, got %d want %d", w.Body.Len(), 72)
+ }
+}
+
+func TestCompressHandlerDeflate(t *testing.T) {
+ w := httptest.NewRecorder()
+ compressedRequest(w, "deflate")
+ if w.HeaderMap.Get("Content-Encoding") != "deflate" {
+ t.Fatalf("wrong content encoding, got %d want %d", w.HeaderMap.Get("Content-Encoding"), "deflate")
+ }
+ if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
+ t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8")
+ }
+ if w.Body.Len() != 54 {
+ t.Fatalf("wrong len, got %d want %d", w.Body.Len(), 54)
+ }
+}
+
+func TestCompressHandlerGzipDeflate(t *testing.T) {
+ w := httptest.NewRecorder()
+ compressedRequest(w, "gzip, deflate ")
+ if w.HeaderMap.Get("Content-Encoding") != "gzip" {
+ t.Fatalf("wrong content encoding, got %s want %s", w.HeaderMap.Get("Content-Encoding"), "gzip")
+ }
+ if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
+ t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go b/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go
new file mode 100644
index 00000000..23a738da
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go
@@ -0,0 +1,356 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package handlers is a collection of handlers for use with Go's net/http package.
+*/
+package handlers
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// MethodHandler is an http.Handler that dispatches to a handler whose key in the MethodHandler's
+// map matches the name of the HTTP request's method, eg: GET
+//
+// If the request's method is OPTIONS and OPTIONS is not a key in the map then the handler
+// responds with a status of 200 and sets the Allow header to a comma-separated list of
+// available methods.
+//
+// If the request's method doesn't match any of its keys the handler responds with
+// a status of 406, Method not allowed and sets the Allow header to a comma-separated list
+// of available methods.
+type MethodHandler map[string]http.Handler
+
+func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if handler, ok := h[req.Method]; ok {
+ handler.ServeHTTP(w, req)
+ } else {
+ allow := []string{}
+ for k := range h {
+ allow = append(allow, k)
+ }
+ sort.Strings(allow)
+ w.Header().Set("Allow", strings.Join(allow, ", "))
+ if req.Method == "OPTIONS" {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ }
+ }
+}
+
+// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends
+type loggingHandler struct {
+ writer io.Writer
+ handler http.Handler
+}
+
+// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends
+type combinedLoggingHandler struct {
+ writer io.Writer
+ handler http.Handler
+}
+
+func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ t := time.Now()
+ var logger loggingResponseWriter
+ if _, ok := w.(http.Hijacker); ok {
+ logger = &hijackLogger{responseLogger: responseLogger{w: w}}
+ } else {
+ logger = &responseLogger{w: w}
+ }
+ url := *req.URL
+ h.handler.ServeHTTP(logger, req)
+ writeLog(h.writer, req, url, t, logger.Status(), logger.Size())
+}
+
+func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ t := time.Now()
+ var logger loggingResponseWriter
+ if _, ok := w.(http.Hijacker); ok {
+ logger = &hijackLogger{responseLogger: responseLogger{w: w}}
+ } else {
+ logger = &responseLogger{w: w}
+ }
+ url := *req.URL
+ h.handler.ServeHTTP(logger, req)
+ writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size())
+}
+
+type loggingResponseWriter interface {
+ http.ResponseWriter
+ Status() int
+ Size() int
+}
+
+// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status
+// code and body size
+type responseLogger struct {
+ w http.ResponseWriter
+ status int
+ size int
+}
+
+func (l *responseLogger) Header() http.Header {
+ return l.w.Header()
+}
+
+func (l *responseLogger) Write(b []byte) (int, error) {
+ if l.status == 0 {
+ // The status will be StatusOK if WriteHeader has not been called yet
+ l.status = http.StatusOK
+ }
+ size, err := l.w.Write(b)
+ l.size += size
+ return size, err
+}
+
+func (l *responseLogger) WriteHeader(s int) {
+ l.w.WriteHeader(s)
+ l.status = s
+}
+
+func (l *responseLogger) Status() int {
+ return l.status
+}
+
+func (l *responseLogger) Size() int {
+ return l.size
+}
+
+type hijackLogger struct {
+ responseLogger
+}
+
+func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ h := l.responseLogger.w.(http.Hijacker)
+ conn, rw, err := h.Hijack()
+ if err == nil && l.responseLogger.status == 0 {
+ // The status will be StatusSwitchingProtocols if there was no error and WriteHeader has not been called yet
+ l.responseLogger.status = http.StatusSwitchingProtocols
+ }
+ return conn, rw, err
+}
+
+const lowerhex = "0123456789abcdef"
+
+func appendQuoted(buf []byte, s string) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ for width := 0; len(s) > 0; s = s[width:] {
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ if r == rune('"') || r == '\\' { // always backslashed
+ buf = append(buf, '\\')
+ buf = append(buf, byte(r))
+ continue
+ }
+ if strconv.IsPrint(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ continue
+ }
+ switch r {
+ case '\a':
+ buf = append(buf, `\a`...)
+ case '\b':
+ buf = append(buf, `\b`...)
+ case '\f':
+ buf = append(buf, `\f`...)
+ case '\n':
+ buf = append(buf, `\n`...)
+ case '\r':
+ buf = append(buf, `\r`...)
+ case '\t':
+ buf = append(buf, `\t`...)
+ case '\v':
+ buf = append(buf, `\v`...)
+ default:
+ switch {
+ case r < ' ':
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ case r > utf8.MaxRune:
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, `\u`...)
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, `\U`...)
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ }
+ return buf
+
+}
+
+// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
+ username := "-"
+ if url.User != nil {
+ if name := url.User.Username(); name != "" {
+ username = name
+ }
+ }
+
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+
+ if err != nil {
+ host = req.RemoteAddr
+ }
+
+ uri := url.RequestURI()
+
+ buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
+ buf = append(buf, host...)
+ buf = append(buf, " - "...)
+ buf = append(buf, username...)
+ buf = append(buf, " ["...)
+ buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
+ buf = append(buf, `] "`...)
+ buf = append(buf, req.Method...)
+ buf = append(buf, " "...)
+ buf = appendQuoted(buf, uri)
+ buf = append(buf, " "...)
+ buf = append(buf, req.Proto...)
+ buf = append(buf, `" `...)
+ buf = append(buf, strconv.Itoa(status)...)
+ buf = append(buf, " "...)
+ buf = append(buf, strconv.Itoa(size)...)
+ return buf
+}
+
+// writeLog writes a log entry for req to w in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) {
+ buf := buildCommonLogLine(req, url, ts, status, size)
+ buf = append(buf, '\n')
+ w.Write(buf)
+}
+
+// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) {
+ buf := buildCommonLogLine(req, url, ts, status, size)
+ buf = append(buf, ` "`...)
+ buf = appendQuoted(buf, req.Referer())
+ buf = append(buf, `" "`...)
+ buf = appendQuoted(buf, req.UserAgent())
+ buf = append(buf, '"', '\n')
+ w.Write(buf)
+}
+
+// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Combined Log Format.
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return combinedLoggingHandler{out, h}
+}
+
+// LoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Common Log Format (CLF).
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h}
+}
+
+// isContentType validates the Content-Type header
+// is contentType. That is, its type and subtype match.
+func isContentType(h http.Header, contentType string) bool {
+ ct := h.Get("Content-Type")
+ if i := strings.IndexRune(ct, ';'); i != -1 {
+ ct = ct[0:i]
+ }
+ return ct == contentType
+}
+
+// ContentTypeHandler wraps and returns a http.Handler, validating the request content type
+// is acompatible with the contentTypes list.
+// It writes a HTTP 415 error if that fails.
+//
+// Only PUT, POST, and PATCH requests are considered.
+func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ for _, ct := range contentTypes {
+ if isContentType(r.Header, ct) {
+ h.ServeHTTP(w, r)
+ return
+ }
+ }
+ http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType)
+ })
+}
+
+const (
+ // HTTPMethodOverrideHeader is a commonly used
+ // http header to override a request method.
+ HTTPMethodOverrideHeader = "X-HTTP-Method-Override"
+ // HTTPMethodOverrideFormKey is a commonly used
+ // HTML form key to override a request method.
+ HTTPMethodOverrideFormKey = "_method"
+)
+
+// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for the X-HTTP-Method-Override header
+// or the _method form key, and overrides (if valid) request.Method with its value.
+//
+// This is especially useful for http clients that don't support many http verbs.
+// It isn't secure to override e.g a GET to a POST, so only POST requests are considered.
+// Likewise, the override method can only be a "write" method: PUT, PATCH or DELETE.
+//
+// Form method takes precedence over header method.
+func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "POST" {
+ om := r.FormValue(HTTPMethodOverrideFormKey)
+ if om == "" {
+ om = r.Header.Get(HTTPMethodOverrideHeader)
+ }
+ if om == "PUT" || om == "PATCH" || om == "DELETE" {
+ r.Method = om
+ }
+ }
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go b/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go
new file mode 100644
index 00000000..94eeb035
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go
@@ -0,0 +1,305 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "bytes"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+)
+
+const (
+ ok = "ok\n"
+ notAllowed = "Method not allowed\n"
+)
+
+var okHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.Write([]byte(ok))
+})
+
+func newRequest(method, url string) *http.Request {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ panic(err)
+ }
+ return req
+}
+
+func TestMethodHandler(t *testing.T) {
+ tests := []struct {
+ req *http.Request
+ handler http.Handler
+ code int
+ allow string // Contents of the Allow header
+ body string
+ }{
+ // No handlers
+ {newRequest("GET", "/foo"), MethodHandler{}, http.StatusMethodNotAllowed, "", notAllowed},
+ {newRequest("OPTIONS", "/foo"), MethodHandler{}, http.StatusOK, "", ""},
+
+ // A single handler
+ {newRequest("GET", "/foo"), MethodHandler{"GET": okHandler}, http.StatusOK, "", ok},
+ {newRequest("POST", "/foo"), MethodHandler{"GET": okHandler}, http.StatusMethodNotAllowed, "GET", notAllowed},
+
+ // Multiple handlers
+ {newRequest("GET", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok},
+ {newRequest("POST", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok},
+ {newRequest("DELETE", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusMethodNotAllowed, "GET, POST", notAllowed},
+ {newRequest("OPTIONS", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "GET, POST", ""},
+
+ // Override OPTIONS
+ {newRequest("OPTIONS", "/foo"), MethodHandler{"OPTIONS": okHandler}, http.StatusOK, "", ok},
+ }
+
+ for i, test := range tests {
+ rec := httptest.NewRecorder()
+ test.handler.ServeHTTP(rec, test.req)
+ if rec.Code != test.code {
+ t.Fatalf("%d: wrong code, got %d want %d", i, rec.Code, test.code)
+ }
+ if allow := rec.HeaderMap.Get("Allow"); allow != test.allow {
+ t.Fatalf("%d: wrong Allow, got %s want %s", i, allow, test.allow)
+ }
+ if body := rec.Body.String(); body != test.body {
+ t.Fatalf("%d: wrong body, got %q want %q", i, body, test.body)
+ }
+ }
+}
+
+func TestWriteLog(t *testing.T) {
+ loc, err := time.LoadLocation("Europe/Warsaw")
+ if err != nil {
+ panic(err)
+ }
+ ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc)
+
+ // A typical request with an OK response
+ req := newRequest("GET", "http://example.com")
+ req.RemoteAddr = "192.168.100.5"
+
+ buf := new(bytes.Buffer)
+ writeLog(buf, req, *req.URL, ts, http.StatusOK, 100)
+ log := buf.String()
+
+ expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+
+ // Request with an unauthorized user
+ req = newRequest("GET", "http://example.com")
+ req.RemoteAddr = "192.168.100.5"
+ req.URL.User = url.User("kamil")
+
+ buf.Reset()
+ writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500)
+ log = buf.String()
+
+ expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+
+ // Request with url encoded parameters
+ req = newRequest("GET", "http://example.com/test?abc=hello%20world&a=b%3F")
+ req.RemoteAddr = "192.168.100.5"
+
+ buf.Reset()
+ writeLog(buf, req, *req.URL, ts, http.StatusOK, 100)
+ log = buf.String()
+
+ expected = "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET /test?abc=hello%20world&a=b%3F HTTP/1.1\" 200 100\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+}
+
+func TestWriteCombinedLog(t *testing.T) {
+ loc, err := time.LoadLocation("Europe/Warsaw")
+ if err != nil {
+ panic(err)
+ }
+ ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc)
+
+ // A typical request with an OK response
+ req := newRequest("GET", "http://example.com")
+ req.RemoteAddr = "192.168.100.5"
+ req.Header.Set("Referer", "http://example.com")
+ req.Header.Set(
+ "User-Agent",
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.33 "+
+ "(KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33",
+ )
+
+ buf := new(bytes.Buffer)
+ writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100)
+ log := buf.String()
+
+ expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " +
+ "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
+ "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+
+ // Request with an unauthorized user
+ req.URL.User = url.User("kamil")
+
+ buf.Reset()
+ writeCombinedLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500)
+ log = buf.String()
+
+ expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500 \"http://example.com\" " +
+ "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
+ "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+
+ // Test with remote ipv6 address
+ req.RemoteAddr = "::1"
+
+ buf.Reset()
+ writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100)
+ log = buf.String()
+
+ expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " +
+ "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
+ "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+
+ // Test remote ipv6 addr, with port
+ req.RemoteAddr = net.JoinHostPort("::1", "65000")
+
+ buf.Reset()
+ writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100)
+ log = buf.String()
+
+ expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " +
+ "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
+ "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
+ if log != expected {
+ t.Fatalf("wrong log, got %q want %q", log, expected)
+ }
+}
+
+func TestLogPathRewrites(t *testing.T) {
+ var buf bytes.Buffer
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ req.URL.Path = "/" // simulate http.StripPrefix and friends
+ w.WriteHeader(200)
+ })
+ logger := LoggingHandler(&buf, handler)
+
+ logger.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/subdir/asdf"))
+
+ if !strings.Contains(buf.String(), "GET /subdir/asdf HTTP") {
+ t.Fatalf("Got log %#v, wanted substring %#v", buf.String(), "GET /subdir/asdf HTTP")
+ }
+}
+
+func BenchmarkWriteLog(b *testing.B) {
+ loc, err := time.LoadLocation("Europe/Warsaw")
+ if err != nil {
+ b.Fatalf(err.Error())
+ }
+ ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc)
+
+ req := newRequest("GET", "http://example.com")
+ req.RemoteAddr = "192.168.100.5"
+
+ b.ResetTimer()
+
+ buf := &bytes.Buffer{}
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500)
+ }
+}
+
+func TestContentTypeHandler(t *testing.T) {
+ tests := []struct {
+ Method string
+ AllowContentTypes []string
+ ContentType string
+ Code int
+ }{
+ {"POST", []string{"application/json"}, "application/json", http.StatusOK},
+ {"POST", []string{"application/json", "application/xml"}, "application/json", http.StatusOK},
+ {"POST", []string{"application/json"}, "application/json; charset=utf-8", http.StatusOK},
+ {"POST", []string{"application/json"}, "application/json+xxx", http.StatusUnsupportedMediaType},
+ {"POST", []string{"application/json"}, "text/plain", http.StatusUnsupportedMediaType},
+ {"GET", []string{"application/json"}, "", http.StatusOK},
+ {"GET", []string{}, "", http.StatusOK},
+ }
+ for _, test := range tests {
+ r, err := http.NewRequest(test.Method, "/", nil)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ h := ContentTypeHandler(okHandler, test.AllowContentTypes...)
+ r.Header.Set("Content-Type", test.ContentType)
+ w := httptest.NewRecorder()
+ h.ServeHTTP(w, r)
+ if w.Code != test.Code {
+ t.Errorf("expected %d, got %d", test.Code, w.Code)
+ }
+ }
+}
+
+func TestHTTPMethodOverride(t *testing.T) {
+ var tests = []struct {
+ Method string
+ OverrideMethod string
+ ExpectedMethod string
+ }{
+ {"POST", "PUT", "PUT"},
+ {"POST", "PATCH", "PATCH"},
+ {"POST", "DELETE", "DELETE"},
+ {"PUT", "DELETE", "PUT"},
+ {"GET", "GET", "GET"},
+ {"HEAD", "HEAD", "HEAD"},
+ {"GET", "PUT", "GET"},
+ {"HEAD", "DELETE", "HEAD"},
+ }
+
+ for _, test := range tests {
+ h := HTTPMethodOverrideHandler(okHandler)
+ reqs := make([]*http.Request, 0, 2)
+
+ rHeader, err := http.NewRequest(test.Method, "/", nil)
+ if err != nil {
+ t.Error(err)
+ }
+ rHeader.Header.Set(HTTPMethodOverrideHeader, test.OverrideMethod)
+ reqs = append(reqs, rHeader)
+
+ f := url.Values{HTTPMethodOverrideFormKey: []string{test.OverrideMethod}}
+ rForm, err := http.NewRequest(test.Method, "/", strings.NewReader(f.Encode()))
+ if err != nil {
+ t.Error(err)
+ }
+ rForm.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ reqs = append(reqs, rForm)
+
+ for _, r := range reqs {
+ w := httptest.NewRecorder()
+ h.ServeHTTP(w, r)
+ if r.Method != test.ExpectedMethod {
+ t.Errorf("Expected %s, got %s", test.ExpectedMethod, r.Method)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml
new file mode 100644
index 00000000..d87d4657
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+ - 1.0
+ - 1.1
+ - 1.2
+ - tip
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE
new file mode 100644
index 00000000..0e5fb872
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/gorilla/mux/README.md
new file mode 100644
index 00000000..e60301b0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/README.md
@@ -0,0 +1,7 @@
+mux
+===
+[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux)
+
+gorilla/mux is a powerful URL router and dispatcher.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go
new file mode 100644
index 00000000..c5f97b2b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go
@@ -0,0 +1,21 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "net/http"
+ "testing"
+)
+
+func BenchmarkMux(b *testing.B) {
+ router := new(Router)
+ handler := func(w http.ResponseWriter, r *http.Request) {}
+ router.HandleFunc("/v1/{v1}", handler)
+
+ request, _ := http.NewRequest("GET", "/v1/anything", nil)
+ for i := 0; i < b.N; i++ {
+ router.ServeHTTP(nil, request)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go
new file mode 100644
index 00000000..b2deed34
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go
@@ -0,0 +1,199 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package gorilla/mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+ * Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ * URL hosts and paths can have variables with an optional regular
+ expression.
+ * Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ * Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ * It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
+ }
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/products/{key}", ProductHandler)
+ r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+ vars := mux.Vars(request)
+ category := vars["category"]
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+ r := mux.NewRouter()
+ // Only matches if domain is "www.domain.com".
+ r.Host("www.domain.com")
+ // Matches a dynamic subdomain.
+ r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+ r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+ r.Methods("GET", "POST")
+
+...or URL schemes:
+
+ r.Schemes("https")
+
+...or header values:
+
+ r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+ r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+ r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+ return r.ProtoMajor == 0
+ })
+
+...and finally, it is possible to combine several matchers in a single route:
+
+ r.HandleFunc("/products", ProductsHandler).
+ Host("www.domain.com").
+ Methods("GET").
+ Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.domain.com". Create a route for that host and get a "subrouter"
+from it:
+
+ r := mux.NewRouter()
+ s := r.Host("www.domain.com").Subrouter()
+
+Then register routes in the subrouter:
+
+ s.HandleFunc("/products/", ProductsHandler)
+ s.HandleFunc("/products/{key}", ProductHandler)
+ s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.domain.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+ r := mux.NewRouter()
+ s := r.PathPrefix("/products").Subrouter()
+ // "/products/"
+ s.HandleFunc("/", ProductsHandler)
+ // "/products/{key}/"
+ s.HandleFunc("/{key}/", ProductHandler)
+ // "/products/{key}/details"
+ s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+ Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+ url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+ "/articles/technology/42"
+
+This also works for host variables:
+
+ r := mux.NewRouter()
+ r.Host("{subdomain}.domain.com").
+ Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // url.String() will be "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+ // "http://news.domain.com/"
+ host, err := r.Get("article").URLHost("subdomain", "news")
+
+ // "/articles/technology/42"
+ path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+ r := mux.NewRouter()
+ s := r.Host("{subdomain}.domain.com").Subrouter()
+ s.Path("/articles/{category}/{id:[0-9]+}").
+ HandlerFunc(ArticleHandler).
+ Name("article")
+
+ // "http://news.domain.com/articles/technology/42"
+ url, err := r.Get("article").URL("subdomain", "news",
+ "category", "technology",
+ "id", "42")
+*/
+package mux
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go
new file mode 100644
index 00000000..5b5f8e7d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go
@@ -0,0 +1,353 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "fmt"
+ "net/http"
+ "path"
+
+ "github.com/gorilla/context"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+ return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+// var router = mux.NewRouter()
+//
+// func main() {
+// http.Handle("/", router)
+// }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+// func init() {
+// http.Handle("/", router)
+// }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+ // Configurable Handler to be used when no route matches.
+ NotFoundHandler http.Handler
+ // Parent route, if this is a subrouter.
+ parent parentRoute
+ // Routes to be matched, in order.
+ routes []*Route
+ // Routes by name for URL building.
+ namedRoutes map[string]*Route
+ // See Router.StrictSlash(). This defines the flag for new routes.
+ strictSlash bool
+ // If true, do not clear the request context after handling the request
+ KeepContext bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+ for _, route := range r.routes {
+ if route.Match(req, match) {
+ return true
+ }
+ }
+ return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ // Clean path to canonical form and redirect.
+ if p := cleanPath(req.URL.Path); p != req.URL.Path {
+
+ // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query.
+ // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
+ // http://code.google.com/p/go/issues/detail?id=5252
+ url := *req.URL
+ url.Path = p
+ p = url.String()
+
+ w.Header().Set("Location", p)
+ w.WriteHeader(http.StatusMovedPermanently)
+ return
+ }
+ var match RouteMatch
+ var handler http.Handler
+ if r.Match(req, &match) {
+ handler = match.Handler
+ setVars(req, match.Vars)
+ setCurrentRoute(req, match.Route)
+ }
+ if handler == nil {
+ handler = r.NotFoundHandler
+ if handler == nil {
+ handler = http.NotFoundHandler()
+ }
+ }
+ if !r.KeepContext {
+ defer context.Clear(req)
+ }
+ handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+ return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.strictSlash = value
+ return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+ if r.namedRoutes == nil {
+ if r.parent != nil {
+ r.namedRoutes = r.parent.getNamedRoutes()
+ } else {
+ r.namedRoutes = make(map[string]*Route)
+ }
+ }
+ return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+ if r.parent != nil {
+ return r.parent.getRegexpGroup()
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+ route := &Route{parent: r, strictSlash: r.strictSlash}
+ r.routes = append(r.routes, route)
+ return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+ return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+ *http.Request)) *Route {
+ return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+ return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+ return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+ return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+ return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+ return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+ return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+ return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+ return r.NewRoute().Schemes(schemes...)
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+ Route *Route
+ Handler http.Handler
+ Vars map[string]string
+}
+
+type contextKey int
+
+const (
+ varsKey contextKey = iota
+ routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+ if rv := context.Get(r, varsKey); rv != nil {
+ return rv.(map[string]string)
+ }
+ return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+func CurrentRoute(r *http.Request) *Route {
+ if rv := context.Get(r, routeKey); rv != nil {
+ return rv.(*Route)
+ }
+ return nil
+}
+
+func setVars(r *http.Request, val interface{}) {
+ context.Set(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) {
+ context.Set(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+ return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+ for _, v1 := range s1 {
+ for _, v2 := range s2 {
+ if v1 == v2 {
+ return fmt.Errorf("mux: duplicated route variable %q", v2)
+ }
+ }
+ }
+ return nil
+}
+
+// mapFromPairs converts variadic string parameters to a string map.
+func mapFromPairs(pairs ...string) (map[string]string, error) {
+ length := len(pairs)
+ if length%2 != 0 {
+ return nil, fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ }
+ m := make(map[string]string, length/2)
+ for i := 0; i < length; i += 2 {
+ m[pairs[i]] = pairs[i+1]
+ }
+ return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+ for _, v := range arr {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// matchMap returns true if the given key/value pairs exist in a given map.
+func matchMap(toCheck map[string]string, toMatch map[string][]string,
+ canonicalKey bool) bool {
+ for k, v := range toCheck {
+ // Check if key exists.
+ if canonicalKey {
+ k = http.CanonicalHeaderKey(k)
+ }
+ if values := toMatch[k]; values == nil {
+ return false
+ } else if v != "" {
+ // If value was defined as an empty string we only check that the
+ // key exists. Otherwise we also check for equality.
+ valueExists := false
+ for _, value := range values {
+ if v == value {
+ valueExists = true
+ break
+ }
+ }
+ if !valueExists {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go
new file mode 100644
index 00000000..e455bce8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go
@@ -0,0 +1,943 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/gorilla/context"
+)
+
+type routeTest struct {
+ title string // title of the test
+ route *Route // the route being tested
+ request *http.Request // a request to test the route
+ vars map[string]string // the expected vars of the match
+ host string // the expected host of the match
+ path string // the expected path of the match
+ shouldMatch bool // whether the request is expected to match the route at all
+ shouldRedirect bool // whether the request should result in a redirect
+}
+
+func TestHost(t *testing.T) {
+ // newRequestHost a new request with a method, url, and host header
+ newRequestHost := func(method, url, host string) *http.Request {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ panic(err)
+ }
+ req.Host = host
+ return req
+ }
+
+ tests := []routeTest{
+ {
+ title: "Host route match",
+ route: new(Route).Host("aaa.bbb.ccc"),
+ request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Host route, wrong host in request URL",
+ route: new(Route).Host("aaa.bbb.ccc"),
+ request: newRequest("GET", "http://aaa.222.ccc/111/222/333"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: false,
+ },
+ {
+ title: "Host route with port, match",
+ route: new(Route).Host("aaa.bbb.ccc:1234"),
+ request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc:1234",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Host route with port, wrong port in request URL",
+ route: new(Route).Host("aaa.bbb.ccc:1234"),
+ request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc:1234",
+ path: "",
+ shouldMatch: false,
+ },
+ {
+ title: "Host route, match with host in request header",
+ route: new(Route).Host("aaa.bbb.ccc"),
+ request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Host route, wrong host in request header",
+ route: new(Route).Host("aaa.bbb.ccc"),
+ request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: false,
+ },
+ // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true},
+ {
+ title: "Host route with port, wrong host in request header",
+ route: new(Route).Host("aaa.bbb.ccc:1234"),
+ request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"),
+ vars: map[string]string{},
+ host: "aaa.bbb.ccc:1234",
+ path: "",
+ shouldMatch: false,
+ },
+ {
+ title: "Host route with pattern, match",
+ route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"),
+ request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"),
+ vars: map[string]string{"v1": "bbb"},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Host route with pattern, wrong host in request URL",
+ route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"),
+ request: newRequest("GET", "http://aaa.222.ccc/111/222/333"),
+ vars: map[string]string{"v1": "bbb"},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: false,
+ },
+ {
+ title: "Host route with multiple patterns, match",
+ route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"),
+ request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"),
+ vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Host route with multiple patterns, wrong host in request URL",
+ route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"),
+ request: newRequest("GET", "http://aaa.222.ccc/111/222/333"),
+ vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"},
+ host: "aaa.bbb.ccc",
+ path: "",
+ shouldMatch: false,
+ },
+ }
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestPath(t *testing.T) {
+ tests := []routeTest{
+ {
+ title: "Path route, match",
+ route: new(Route).Path("/111/222/333"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111/222/333",
+ shouldMatch: true,
+ },
+ {
+ title: "Path route, match with trailing slash in request and path",
+ route: new(Route).Path("/111/"),
+ request: newRequest("GET", "http://localhost/111/"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111/",
+ shouldMatch: true,
+ },
+ {
+ title: "Path route, do not match with trailing slash in path",
+ route: new(Route).Path("/111/"),
+ request: newRequest("GET", "http://localhost/111"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111",
+ shouldMatch: false,
+ },
+ {
+ title: "Path route, do not match with trailing slash in request",
+ route: new(Route).Path("/111"),
+ request: newRequest("GET", "http://localhost/111/"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111/",
+ shouldMatch: false,
+ },
+ {
+ title: "Path route, wrong path in request in request URL",
+ route: new(Route).Path("/111/222/333"),
+ request: newRequest("GET", "http://localhost/1/2/3"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111/222/333",
+ shouldMatch: false,
+ },
+ {
+ title: "Path route with pattern, match",
+ route: new(Route).Path("/111/{v1:[0-9]{3}}/333"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{"v1": "222"},
+ host: "",
+ path: "/111/222/333",
+ shouldMatch: true,
+ },
+ {
+ title: "Path route with pattern, URL in request does not match",
+ route: new(Route).Path("/111/{v1:[0-9]{3}}/333"),
+ request: newRequest("GET", "http://localhost/111/aaa/333"),
+ vars: map[string]string{"v1": "222"},
+ host: "",
+ path: "/111/222/333",
+ shouldMatch: false,
+ },
+ {
+ title: "Path route with multiple patterns, match",
+ route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"},
+ host: "",
+ path: "/111/222/333",
+ shouldMatch: true,
+ },
+ {
+ title: "Path route with multiple patterns, URL in request does not match",
+ route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"),
+ request: newRequest("GET", "http://localhost/111/aaa/333"),
+ vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"},
+ host: "",
+ path: "/111/222/333",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestPathPrefix(t *testing.T) {
+ tests := []routeTest{
+ {
+ title: "PathPrefix route, match",
+ route: new(Route).PathPrefix("/111"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111",
+ shouldMatch: true,
+ },
+ {
+ title: "PathPrefix route, match substring",
+ route: new(Route).PathPrefix("/1"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{},
+ host: "",
+ path: "/1",
+ shouldMatch: true,
+ },
+ {
+ title: "PathPrefix route, URL prefix in request does not match",
+ route: new(Route).PathPrefix("/111"),
+ request: newRequest("GET", "http://localhost/1/2/3"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111",
+ shouldMatch: false,
+ },
+ {
+ title: "PathPrefix route with pattern, match",
+ route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{"v1": "222"},
+ host: "",
+ path: "/111/222",
+ shouldMatch: true,
+ },
+ {
+ title: "PathPrefix route with pattern, URL prefix in request does not match",
+ route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"),
+ request: newRequest("GET", "http://localhost/111/aaa/333"),
+ vars: map[string]string{"v1": "222"},
+ host: "",
+ path: "/111/222",
+ shouldMatch: false,
+ },
+ {
+ title: "PathPrefix route with multiple patterns, match",
+ route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"),
+ request: newRequest("GET", "http://localhost/111/222/333"),
+ vars: map[string]string{"v1": "111", "v2": "222"},
+ host: "",
+ path: "/111/222",
+ shouldMatch: true,
+ },
+ {
+ title: "PathPrefix route with multiple patterns, URL prefix in request does not match",
+ route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"),
+ request: newRequest("GET", "http://localhost/111/aaa/333"),
+ vars: map[string]string{"v1": "111", "v2": "222"},
+ host: "",
+ path: "/111/222",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestHostPath(t *testing.T) {
+ tests := []routeTest{
+ {
+ title: "Host and Path route, match",
+ route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"),
+ request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Host and Path route, wrong host in request URL",
+ route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"),
+ request: newRequest("GET", "http://aaa.222.ccc/111/222/333"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ {
+ title: "Host and Path route with pattern, match",
+ route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"),
+ request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"),
+ vars: map[string]string{"v1": "bbb", "v2": "222"},
+ host: "aaa.bbb.ccc",
+ path: "/111/222/333",
+ shouldMatch: true,
+ },
+ {
+ title: "Host and Path route with pattern, URL in request does not match",
+ route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"),
+ request: newRequest("GET", "http://aaa.222.ccc/111/222/333"),
+ vars: map[string]string{"v1": "bbb", "v2": "222"},
+ host: "aaa.bbb.ccc",
+ path: "/111/222/333",
+ shouldMatch: false,
+ },
+ {
+ title: "Host and Path route with multiple patterns, match",
+ route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"),
+ request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"),
+ vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"},
+ host: "aaa.bbb.ccc",
+ path: "/111/222/333",
+ shouldMatch: true,
+ },
+ {
+ title: "Host and Path route with multiple patterns, URL in request does not match",
+ route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"),
+ request: newRequest("GET", "http://aaa.222.ccc/111/222/333"),
+ vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"},
+ host: "aaa.bbb.ccc",
+ path: "/111/222/333",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestHeaders(t *testing.T) {
+ // newRequestHeaders creates a new request with a method, url, and headers
+ newRequestHeaders := func(method, url string, headers map[string]string) *http.Request {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ panic(err)
+ }
+ for k, v := range headers {
+ req.Header.Add(k, v)
+ }
+ return req
+ }
+
+ tests := []routeTest{
+ {
+ title: "Headers route, match",
+ route: new(Route).Headers("foo", "bar", "baz", "ding"),
+ request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Headers route, bad header values",
+ route: new(Route).Headers("foo", "bar", "baz", "ding"),
+ request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+
+}
+
+func TestMethods(t *testing.T) {
+ tests := []routeTest{
+ {
+ title: "Methods route, match GET",
+ route: new(Route).Methods("GET", "POST"),
+ request: newRequest("GET", "http://localhost"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Methods route, match POST",
+ route: new(Route).Methods("GET", "POST"),
+ request: newRequest("POST", "http://localhost"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Methods route, bad method",
+ route: new(Route).Methods("GET", "POST"),
+ request: newRequest("PUT", "http://localhost"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestQueries(t *testing.T) {
+ tests := []routeTest{
+ {
+ title: "Queries route, match",
+ route: new(Route).Queries("foo", "bar", "baz", "ding"),
+ request: newRequest("GET", "http://localhost?foo=bar&baz=ding"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Queries route, match with a query string",
+ route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"),
+ request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Queries route, match with a query string out of order",
+ route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"),
+ request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Queries route, bad query",
+ route: new(Route).Queries("foo", "bar", "baz", "ding"),
+ request: newRequest("GET", "http://localhost?foo=bar&baz=dong"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ {
+ title: "Queries route with pattern, match",
+ route: new(Route).Queries("foo", "{v1}"),
+ request: newRequest("GET", "http://localhost?foo=bar"),
+ vars: map[string]string{"v1": "bar"},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Queries route with multiple patterns, match",
+ route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"),
+ request: newRequest("GET", "http://localhost?foo=bar&baz=ding"),
+ vars: map[string]string{"v1": "bar", "v2": "ding"},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Queries route with regexp pattern, match",
+ route: new(Route).Queries("foo", "{v1:[0-9]+}"),
+ request: newRequest("GET", "http://localhost?foo=10"),
+ vars: map[string]string{"v1": "10"},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Queries route with regexp pattern, regexp does not match",
+ route: new(Route).Queries("foo", "{v1:[0-9]+}"),
+ request: newRequest("GET", "http://localhost?foo=a"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestSchemes(t *testing.T) {
+ tests := []routeTest{
+ // Schemes
+ {
+ title: "Schemes route, match https",
+ route: new(Route).Schemes("https", "ftp"),
+ request: newRequest("GET", "https://localhost"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Schemes route, match ftp",
+ route: new(Route).Schemes("https", "ftp"),
+ request: newRequest("GET", "ftp://localhost"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "Schemes route, bad scheme",
+ route: new(Route).Schemes("https", "ftp"),
+ request: newRequest("GET", "http://localhost"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ }
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestMatcherFunc(t *testing.T) {
+ m := func(r *http.Request, m *RouteMatch) bool {
+ if r.URL.Host == "aaa.bbb.ccc" {
+ return true
+ }
+ return false
+ }
+
+ tests := []routeTest{
+ {
+ title: "MatchFunc route, match",
+ route: new(Route).MatcherFunc(m),
+ request: newRequest("GET", "http://aaa.bbb.ccc"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: true,
+ },
+ {
+ title: "MatchFunc route, non-match",
+ route: new(Route).MatcherFunc(m),
+ request: newRequest("GET", "http://aaa.222.ccc"),
+ vars: map[string]string{},
+ host: "",
+ path: "",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestSubRouter(t *testing.T) {
+ subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter()
+ subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter()
+
+ tests := []routeTest{
+ {
+ route: subrouter1.Path("/{v2:[a-z]+}"),
+ request: newRequest("GET", "http://aaa.google.com/bbb"),
+ vars: map[string]string{"v1": "aaa", "v2": "bbb"},
+ host: "aaa.google.com",
+ path: "/bbb",
+ shouldMatch: true,
+ },
+ {
+ route: subrouter1.Path("/{v2:[a-z]+}"),
+ request: newRequest("GET", "http://111.google.com/111"),
+ vars: map[string]string{"v1": "aaa", "v2": "bbb"},
+ host: "aaa.google.com",
+ path: "/bbb",
+ shouldMatch: false,
+ },
+ {
+ route: subrouter2.Path("/baz/{v2}"),
+ request: newRequest("GET", "http://localhost/foo/bar/baz/ding"),
+ vars: map[string]string{"v1": "bar", "v2": "ding"},
+ host: "",
+ path: "/foo/bar/baz/ding",
+ shouldMatch: true,
+ },
+ {
+ route: subrouter2.Path("/baz/{v2}"),
+ request: newRequest("GET", "http://localhost/foo/bar"),
+ vars: map[string]string{"v1": "bar", "v2": "ding"},
+ host: "",
+ path: "/foo/bar/baz/ding",
+ shouldMatch: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+func TestNamedRoutes(t *testing.T) {
+ r1 := NewRouter()
+ r1.NewRoute().Name("a")
+ r1.NewRoute().Name("b")
+ r1.NewRoute().Name("c")
+
+ r2 := r1.NewRoute().Subrouter()
+ r2.NewRoute().Name("d")
+ r2.NewRoute().Name("e")
+ r2.NewRoute().Name("f")
+
+ r3 := r2.NewRoute().Subrouter()
+ r3.NewRoute().Name("g")
+ r3.NewRoute().Name("h")
+ r3.NewRoute().Name("i")
+
+ if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 {
+ t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes)
+ } else if r1.Get("i") == nil {
+ t.Errorf("Subroute name not registered")
+ }
+}
+
+func TestStrictSlash(t *testing.T) {
+ r := NewRouter()
+ r.StrictSlash(true)
+
+ tests := []routeTest{
+ {
+ title: "Redirect path without slash",
+ route: r.NewRoute().Path("/111/"),
+ request: newRequest("GET", "http://localhost/111"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111/",
+ shouldMatch: true,
+ shouldRedirect: true,
+ },
+ {
+ title: "Do not redirect path with slash",
+ route: r.NewRoute().Path("/111/"),
+ request: newRequest("GET", "http://localhost/111/"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111/",
+ shouldMatch: true,
+ shouldRedirect: false,
+ },
+ {
+ title: "Redirect path with slash",
+ route: r.NewRoute().Path("/111"),
+ request: newRequest("GET", "http://localhost/111/"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111",
+ shouldMatch: true,
+ shouldRedirect: true,
+ },
+ {
+ title: "Do not redirect path without slash",
+ route: r.NewRoute().Path("/111"),
+ request: newRequest("GET", "http://localhost/111"),
+ vars: map[string]string{},
+ host: "",
+ path: "/111",
+ shouldMatch: true,
+ shouldRedirect: false,
+ },
+ {
+ title: "Propagate StrictSlash to subrouters",
+ route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"),
+ request: newRequest("GET", "http://localhost/static/images"),
+ vars: map[string]string{},
+ host: "",
+ path: "/static/images/",
+ shouldMatch: true,
+ shouldRedirect: true,
+ },
+ {
+ title: "Ignore StrictSlash for path prefix",
+ route: r.NewRoute().PathPrefix("/static/"),
+ request: newRequest("GET", "http://localhost/static/logo.png"),
+ vars: map[string]string{},
+ host: "",
+ path: "/static/",
+ shouldMatch: true,
+ shouldRedirect: false,
+ },
+ }
+
+ for _, test := range tests {
+ testRoute(t, test)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+func getRouteTemplate(route *Route) string {
+ host, path := "none", "none"
+ if route.regexp != nil {
+ if route.regexp.host != nil {
+ host = route.regexp.host.template
+ }
+ if route.regexp.path != nil {
+ path = route.regexp.path.template
+ }
+ }
+ return fmt.Sprintf("Host: %v, Path: %v", host, path)
+}
+
+func testRoute(t *testing.T, test routeTest) {
+ request := test.request
+ route := test.route
+ vars := test.vars
+ shouldMatch := test.shouldMatch
+ host := test.host
+ path := test.path
+ url := test.host + test.path
+ shouldRedirect := test.shouldRedirect
+
+ var match RouteMatch
+ ok := route.Match(request, &match)
+ if ok != shouldMatch {
+ msg := "Should match"
+ if !shouldMatch {
+ msg = "Should not match"
+ }
+ t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars)
+ return
+ }
+ if shouldMatch {
+ if test.vars != nil && !stringMapEqual(test.vars, match.Vars) {
+ t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars)
+ return
+ }
+ if host != "" {
+ u, _ := test.route.URLHost(mapToPairs(match.Vars)...)
+ if host != u.Host {
+ t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route))
+ return
+ }
+ }
+ if path != "" {
+ u, _ := route.URLPath(mapToPairs(match.Vars)...)
+ if path != u.Path {
+ t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route))
+ return
+ }
+ }
+ if url != "" {
+ u, _ := route.URL(mapToPairs(match.Vars)...)
+ if url != u.Host+u.Path {
+ t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route))
+ return
+ }
+ }
+ if shouldRedirect && match.Handler == nil {
+ t.Errorf("(%v) Did not redirect", test.title)
+ return
+ }
+ if !shouldRedirect && match.Handler != nil {
+ t.Errorf("(%v) Unexpected redirect", test.title)
+ return
+ }
+ }
+}
+
+// Tests that the context is cleared or not cleared properly depending on
+// the configuration of the router
+func TestKeepContext(t *testing.T) {
+ func1 := func(w http.ResponseWriter, r *http.Request) {}
+
+ r := NewRouter()
+ r.HandleFunc("/", func1).Name("func1")
+
+ req, _ := http.NewRequest("GET", "http://localhost/", nil)
+ context.Set(req, "t", 1)
+
+ res := new(http.ResponseWriter)
+ r.ServeHTTP(*res, req)
+
+ if _, ok := context.GetOk(req, "t"); ok {
+ t.Error("Context should have been cleared at end of request")
+ }
+
+ r.KeepContext = true
+
+ req, _ = http.NewRequest("GET", "http://localhost/", nil)
+ context.Set(req, "t", 1)
+
+ r.ServeHTTP(*res, req)
+ if _, ok := context.GetOk(req, "t"); !ok {
+ t.Error("Context should NOT have been cleared at end of request")
+ }
+
+}
+
+type TestA301ResponseWriter struct {
+ hh http.Header
+ status int
+}
+
+func (ho TestA301ResponseWriter) Header() http.Header {
+ return http.Header(ho.hh)
+}
+
+func (ho TestA301ResponseWriter) Write(b []byte) (int, error) {
+ return 0, nil
+}
+
+func (ho TestA301ResponseWriter) WriteHeader(code int) {
+ ho.status = code
+}
+
+func Test301Redirect(t *testing.T) {
+ m := make(http.Header)
+
+ func1 := func(w http.ResponseWriter, r *http.Request) {}
+ func2 := func(w http.ResponseWriter, r *http.Request) {}
+
+ r := NewRouter()
+ r.HandleFunc("/api/", func2).Name("func2")
+ r.HandleFunc("/", func1).Name("func1")
+
+ req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil)
+
+ res := TestA301ResponseWriter{
+ hh: m,
+ status: 0,
+ }
+ r.ServeHTTP(&res, req)
+
+ if "http://localhost/api/?abc=def" != res.hh["Location"][0] {
+ t.Errorf("Should have complete URL with query string")
+ }
+}
+
+// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW
+func TestSubrouterHeader(t *testing.T) {
+ expected := "func1 response"
+ func1 := func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprint(w, expected)
+ }
+ func2 := func(http.ResponseWriter, *http.Request) {}
+
+ r := NewRouter()
+ s := r.Headers("SomeSpecialHeader", "").Subrouter()
+ s.HandleFunc("/", func1).Name("func1")
+ r.HandleFunc("/", func2).Name("func2")
+
+ req, _ := http.NewRequest("GET", "http://localhost/", nil)
+ req.Header.Add("SomeSpecialHeader", "foo")
+ match := new(RouteMatch)
+ matched := r.Match(req, match)
+ if !matched {
+ t.Errorf("Should match request")
+ }
+ if match.Route.GetName() != "func1" {
+ t.Errorf("Expecting func1 handler, got %s", match.Route.GetName())
+ }
+ resp := NewRecorder()
+ match.Handler.ServeHTTP(resp, req)
+ if resp.Body.String() != expected {
+ t.Errorf("Expecting %q", expected)
+ }
+}
+
+// mapToPairs converts a string map to a slice of string pairs
+func mapToPairs(m map[string]string) []string {
+ var i int
+ p := make([]string, len(m)*2)
+ for k, v := range m {
+ p[i] = k
+ p[i+1] = v
+ i += 2
+ }
+ return p
+}
+
+// stringMapEqual checks the equality of two string maps
+func stringMapEqual(m1, m2 map[string]string) bool {
+ nil1 := m1 == nil
+ nil2 := m2 == nil
+ if nil1 != nil2 || len(m1) != len(m2) {
+ return false
+ }
+ for k, v := range m1 {
+ if v != m2[k] {
+ return false
+ }
+ }
+ return true
+}
+
+// newRequest is a helper function to create a new request with a method and url
+func newRequest(method, url string) *http.Request {
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ panic(err)
+ }
+ return req
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go
new file mode 100644
index 00000000..1f7c190c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go
@@ -0,0 +1,714 @@
+// Old tests ported to Go1. This is a mess. Want to drop it one day.
+
+// Copyright 2011 Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "net/http"
+ "testing"
+)
+
+// ----------------------------------------------------------------------------
+// ResponseRecorder
+// ----------------------------------------------------------------------------
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// ResponseRecorder is an implementation of http.ResponseWriter that
+// records its mutations for later inspection in tests.
+type ResponseRecorder struct {
+ Code int // the HTTP response code from WriteHeader
+ HeaderMap http.Header // the HTTP response headers
+ Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
+ Flushed bool
+}
+
+// NewRecorder returns an initialized ResponseRecorder.
+func NewRecorder() *ResponseRecorder {
+ return &ResponseRecorder{
+ HeaderMap: make(http.Header),
+ Body: new(bytes.Buffer),
+ }
+}
+
+// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
+// an explicit DefaultRemoteAddr isn't set on ResponseRecorder.
+const DefaultRemoteAddr = "1.2.3.4"
+
+// Header returns the response headers.
+func (rw *ResponseRecorder) Header() http.Header {
+ return rw.HeaderMap
+}
+
+// Write always succeeds and writes to rw.Body, if not nil.
+func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
+ if rw.Body != nil {
+ rw.Body.Write(buf)
+ }
+ if rw.Code == 0 {
+ rw.Code = http.StatusOK
+ }
+ return len(buf), nil
+}
+
+// WriteHeader sets rw.Code.
+func (rw *ResponseRecorder) WriteHeader(code int) {
+ rw.Code = code
+}
+
+// Flush sets rw.Flushed to true.
+func (rw *ResponseRecorder) Flush() {
+ rw.Flushed = true
+}
+
+// ----------------------------------------------------------------------------
+
+func TestRouteMatchers(t *testing.T) {
+ var scheme, host, path, query, method string
+ var headers map[string]string
+ var resultVars map[bool]map[string]string
+
+ router := NewRouter()
+ router.NewRoute().Host("{var1}.google.com").
+ Path("/{var2:[a-z]+}/{var3:[0-9]+}").
+ Queries("foo", "bar").
+ Methods("GET").
+ Schemes("https").
+ Headers("x-requested-with", "XMLHttpRequest")
+ router.NewRoute().Host("www.{var4}.com").
+ PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}").
+ Queries("baz", "ding").
+ Methods("POST").
+ Schemes("http").
+ Headers("Content-Type", "application/json")
+
+ reset := func() {
+ // Everything match.
+ scheme = "https"
+ host = "www.google.com"
+ path = "/product/42"
+ query = "?foo=bar"
+ method = "GET"
+ headers = map[string]string{"X-Requested-With": "XMLHttpRequest"}
+ resultVars = map[bool]map[string]string{
+ true: {"var1": "www", "var2": "product", "var3": "42"},
+ false: {},
+ }
+ }
+
+ reset2 := func() {
+ // Everything match.
+ scheme = "http"
+ host = "www.google.com"
+ path = "/foo/product/42/path/that/is/ignored"
+ query = "?baz=ding"
+ method = "POST"
+ headers = map[string]string{"Content-Type": "application/json"}
+ resultVars = map[bool]map[string]string{
+ true: {"var4": "google", "var5": "product", "var6": "42"},
+ false: {},
+ }
+ }
+
+ match := func(shouldMatch bool) {
+ url := scheme + "://" + host + path + query
+ request, _ := http.NewRequest(method, url, nil)
+ for key, value := range headers {
+ request.Header.Add(key, value)
+ }
+
+ var routeMatch RouteMatch
+ matched := router.Match(request, &routeMatch)
+ if matched != shouldMatch {
+ // Need better messages. :)
+ if matched {
+ t.Errorf("Should match.")
+ } else {
+ t.Errorf("Should not match.")
+ }
+ }
+
+ if matched {
+ currentRoute := routeMatch.Route
+ if currentRoute == nil {
+ t.Errorf("Expected a current route.")
+ }
+ vars := routeMatch.Vars
+ expectedVars := resultVars[shouldMatch]
+ if len(vars) != len(expectedVars) {
+ t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars)
+ }
+ for name, value := range vars {
+ if expectedVars[name] != value {
+ t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars)
+ }
+ }
+ }
+ }
+
+ // 1st route --------------------------------------------------------------
+
+ // Everything match.
+ reset()
+ match(true)
+
+ // Scheme doesn't match.
+ reset()
+ scheme = "http"
+ match(false)
+
+ // Host doesn't match.
+ reset()
+ host = "www.mygoogle.com"
+ match(false)
+
+ // Path doesn't match.
+ reset()
+ path = "/product/notdigits"
+ match(false)
+
+ // Query doesn't match.
+ reset()
+ query = "?foo=baz"
+ match(false)
+
+ // Method doesn't match.
+ reset()
+ method = "POST"
+ match(false)
+
+ // Header doesn't match.
+ reset()
+ headers = map[string]string{}
+ match(false)
+
+ // Everything match, again.
+ reset()
+ match(true)
+
+ // 2nd route --------------------------------------------------------------
+
+ // Everything match.
+ reset2()
+ match(true)
+
+ // Scheme doesn't match.
+ reset2()
+ scheme = "https"
+ match(false)
+
+ // Host doesn't match.
+ reset2()
+ host = "sub.google.com"
+ match(false)
+
+ // Path doesn't match.
+ reset2()
+ path = "/bar/product/42"
+ match(false)
+
+ // Query doesn't match.
+ reset2()
+ query = "?foo=baz"
+ match(false)
+
+ // Method doesn't match.
+ reset2()
+ method = "GET"
+ match(false)
+
+ // Header doesn't match.
+ reset2()
+ headers = map[string]string{}
+ match(false)
+
+ // Everything match, again.
+ reset2()
+ match(true)
+}
+
+type headerMatcherTest struct {
+ matcher headerMatcher
+ headers map[string]string
+ result bool
+}
+
+var headerMatcherTests = []headerMatcherTest{
+ {
+ matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}),
+ headers: map[string]string{"X-Requested-With": "XMLHttpRequest"},
+ result: true,
+ },
+ {
+ matcher: headerMatcher(map[string]string{"x-requested-with": ""}),
+ headers: map[string]string{"X-Requested-With": "anything"},
+ result: true,
+ },
+ {
+ matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}),
+ headers: map[string]string{},
+ result: false,
+ },
+}
+
+type hostMatcherTest struct {
+ matcher *Route
+ url string
+ vars map[string]string
+ result bool
+}
+
+var hostMatcherTests = []hostMatcherTest{
+ {
+ matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"),
+ url: "http://abc.def.ghi/",
+ vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"},
+ result: true,
+ },
+ {
+ matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"),
+ url: "http://a.b.c/",
+ vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"},
+ result: false,
+ },
+}
+
+type methodMatcherTest struct {
+ matcher methodMatcher
+ method string
+ result bool
+}
+
+var methodMatcherTests = []methodMatcherTest{
+ {
+ matcher: methodMatcher([]string{"GET", "POST", "PUT"}),
+ method: "GET",
+ result: true,
+ },
+ {
+ matcher: methodMatcher([]string{"GET", "POST", "PUT"}),
+ method: "POST",
+ result: true,
+ },
+ {
+ matcher: methodMatcher([]string{"GET", "POST", "PUT"}),
+ method: "PUT",
+ result: true,
+ },
+ {
+ matcher: methodMatcher([]string{"GET", "POST", "PUT"}),
+ method: "DELETE",
+ result: false,
+ },
+}
+
+type pathMatcherTest struct {
+ matcher *Route
+ url string
+ vars map[string]string
+ result bool
+}
+
+var pathMatcherTests = []pathMatcherTest{
+ {
+ matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"),
+ url: "http://localhost:8080/123/456/789",
+ vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"},
+ result: true,
+ },
+ {
+ matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"),
+ url: "http://localhost:8080/1/2/3",
+ vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"},
+ result: false,
+ },
+}
+
+type schemeMatcherTest struct {
+ matcher schemeMatcher
+ url string
+ result bool
+}
+
+var schemeMatcherTests = []schemeMatcherTest{
+ {
+ matcher: schemeMatcher([]string{"http", "https"}),
+ url: "http://localhost:8080/",
+ result: true,
+ },
+ {
+ matcher: schemeMatcher([]string{"http", "https"}),
+ url: "https://localhost:8080/",
+ result: true,
+ },
+ {
+ matcher: schemeMatcher([]string{"https"}),
+ url: "http://localhost:8080/",
+ result: false,
+ },
+ {
+ matcher: schemeMatcher([]string{"http"}),
+ url: "https://localhost:8080/",
+ result: false,
+ },
+}
+
+type urlBuildingTest struct {
+ route *Route
+ vars []string
+ url string
+}
+
+var urlBuildingTests = []urlBuildingTest{
+ {
+ route: new(Route).Host("foo.domain.com"),
+ vars: []string{},
+ url: "http://foo.domain.com",
+ },
+ {
+ route: new(Route).Host("{subdomain}.domain.com"),
+ vars: []string{"subdomain", "bar"},
+ url: "http://bar.domain.com",
+ },
+ {
+ route: new(Route).Host("foo.domain.com").Path("/articles"),
+ vars: []string{},
+ url: "http://foo.domain.com/articles",
+ },
+ {
+ route: new(Route).Path("/articles"),
+ vars: []string{},
+ url: "/articles",
+ },
+ {
+ route: new(Route).Path("/articles/{category}/{id:[0-9]+}"),
+ vars: []string{"category", "technology", "id", "42"},
+ url: "/articles/technology/42",
+ },
+ {
+ route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"),
+ vars: []string{"subdomain", "foo", "category", "technology", "id", "42"},
+ url: "http://foo.domain.com/articles/technology/42",
+ },
+}
+
+func TestHeaderMatcher(t *testing.T) {
+ for _, v := range headerMatcherTests {
+ request, _ := http.NewRequest("GET", "http://localhost:8080/", nil)
+ for key, value := range v.headers {
+ request.Header.Add(key, value)
+ }
+ var routeMatch RouteMatch
+ result := v.matcher.Match(request, &routeMatch)
+ if result != v.result {
+ if v.result {
+ t.Errorf("%#v: should match %v.", v.matcher, request.Header)
+ } else {
+ t.Errorf("%#v: should not match %v.", v.matcher, request.Header)
+ }
+ }
+ }
+}
+
+func TestHostMatcher(t *testing.T) {
+ for _, v := range hostMatcherTests {
+ request, _ := http.NewRequest("GET", v.url, nil)
+ var routeMatch RouteMatch
+ result := v.matcher.Match(request, &routeMatch)
+ vars := routeMatch.Vars
+ if result != v.result {
+ if v.result {
+ t.Errorf("%#v: should match %v.", v.matcher, v.url)
+ } else {
+ t.Errorf("%#v: should not match %v.", v.matcher, v.url)
+ }
+ }
+ if result {
+ if len(vars) != len(v.vars) {
+ t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars))
+ }
+ for name, value := range vars {
+ if v.vars[name] != value {
+ t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value)
+ }
+ }
+ } else {
+ if len(vars) != 0 {
+ t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars))
+ }
+ }
+ }
+}
+
+func TestMethodMatcher(t *testing.T) {
+ for _, v := range methodMatcherTests {
+ request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil)
+ var routeMatch RouteMatch
+ result := v.matcher.Match(request, &routeMatch)
+ if result != v.result {
+ if v.result {
+ t.Errorf("%#v: should match %v.", v.matcher, v.method)
+ } else {
+ t.Errorf("%#v: should not match %v.", v.matcher, v.method)
+ }
+ }
+ }
+}
+
+func TestPathMatcher(t *testing.T) {
+ for _, v := range pathMatcherTests {
+ request, _ := http.NewRequest("GET", v.url, nil)
+ var routeMatch RouteMatch
+ result := v.matcher.Match(request, &routeMatch)
+ vars := routeMatch.Vars
+ if result != v.result {
+ if v.result {
+ t.Errorf("%#v: should match %v.", v.matcher, v.url)
+ } else {
+ t.Errorf("%#v: should not match %v.", v.matcher, v.url)
+ }
+ }
+ if result {
+ if len(vars) != len(v.vars) {
+ t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars))
+ }
+ for name, value := range vars {
+ if v.vars[name] != value {
+ t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value)
+ }
+ }
+ } else {
+ if len(vars) != 0 {
+ t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars))
+ }
+ }
+ }
+}
+
+func TestSchemeMatcher(t *testing.T) {
+ for _, v := range schemeMatcherTests {
+ request, _ := http.NewRequest("GET", v.url, nil)
+ var routeMatch RouteMatch
+ result := v.matcher.Match(request, &routeMatch)
+ if result != v.result {
+ if v.result {
+ t.Errorf("%#v: should match %v.", v.matcher, v.url)
+ } else {
+ t.Errorf("%#v: should not match %v.", v.matcher, v.url)
+ }
+ }
+ }
+}
+
+func TestUrlBuilding(t *testing.T) {
+
+ for _, v := range urlBuildingTests {
+ u, _ := v.route.URL(v.vars...)
+ url := u.String()
+ if url != v.url {
+ t.Errorf("expected %v, got %v", v.url, url)
+ /*
+ reversePath := ""
+ reverseHost := ""
+ if v.route.pathTemplate != nil {
+ reversePath = v.route.pathTemplate.Reverse
+ }
+ if v.route.hostTemplate != nil {
+ reverseHost = v.route.hostTemplate.Reverse
+ }
+
+ t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost)
+ */
+ }
+ }
+
+ ArticleHandler := func(w http.ResponseWriter, r *http.Request) {
+ }
+
+ router := NewRouter()
+ router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article")
+
+ url, _ := router.Get("article").URL("category", "technology", "id", "42")
+ expected := "/articles/technology/42"
+ if url.String() != expected {
+ t.Errorf("Expected %v, got %v", expected, url.String())
+ }
+}
+
+func TestMatchedRouteName(t *testing.T) {
+ routeName := "stock"
+ router := NewRouter()
+ route := router.NewRoute().Path("/products/").Name(routeName)
+
+ url := "http://www.domain.com/products/"
+ request, _ := http.NewRequest("GET", url, nil)
+ var rv RouteMatch
+ ok := router.Match(request, &rv)
+
+ if !ok || rv.Route != route {
+ t.Errorf("Expected same route, got %+v.", rv.Route)
+ }
+
+ retName := rv.Route.GetName()
+ if retName != routeName {
+ t.Errorf("Expected %q, got %q.", routeName, retName)
+ }
+}
+
+func TestSubRouting(t *testing.T) {
+ // Example from docs.
+ router := NewRouter()
+ subrouter := router.NewRoute().Host("www.domain.com").Subrouter()
+ route := subrouter.NewRoute().Path("/products/").Name("products")
+
+ url := "http://www.domain.com/products/"
+ request, _ := http.NewRequest("GET", url, nil)
+ var rv RouteMatch
+ ok := router.Match(request, &rv)
+
+ if !ok || rv.Route != route {
+ t.Errorf("Expected same route, got %+v.", rv.Route)
+ }
+
+ u, _ := router.Get("products").URL()
+ builtUrl := u.String()
+ // Yay, subroute aware of the domain when building!
+ if builtUrl != url {
+ t.Errorf("Expected %q, got %q.", url, builtUrl)
+ }
+}
+
+func TestVariableNames(t *testing.T) {
+ route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}")
+ if route.err == nil {
+ t.Errorf("Expected error for duplicated variable names")
+ }
+}
+
+func TestRedirectSlash(t *testing.T) {
+ var route *Route
+ var routeMatch RouteMatch
+ r := NewRouter()
+
+ r.StrictSlash(false)
+ route = r.NewRoute()
+ if route.strictSlash != false {
+ t.Errorf("Expected false redirectSlash.")
+ }
+
+ r.StrictSlash(true)
+ route = r.NewRoute()
+ if route.strictSlash != true {
+ t.Errorf("Expected true redirectSlash.")
+ }
+
+ route = new(Route)
+ route.strictSlash = true
+ route.Path("/{arg1}/{arg2:[0-9]+}/")
+ request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil)
+ routeMatch = RouteMatch{}
+ _ = route.Match(request, &routeMatch)
+ vars := routeMatch.Vars
+ if vars["arg1"] != "foo" {
+ t.Errorf("Expected foo.")
+ }
+ if vars["arg2"] != "123" {
+ t.Errorf("Expected 123.")
+ }
+ rsp := NewRecorder()
+ routeMatch.Handler.ServeHTTP(rsp, request)
+ if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" {
+ t.Errorf("Expected redirect header.")
+ }
+
+ route = new(Route)
+ route.strictSlash = true
+ route.Path("/{arg1}/{arg2:[0-9]+}")
+ request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil)
+ routeMatch = RouteMatch{}
+ _ = route.Match(request, &routeMatch)
+ vars = routeMatch.Vars
+ if vars["arg1"] != "foo" {
+ t.Errorf("Expected foo.")
+ }
+ if vars["arg2"] != "123" {
+ t.Errorf("Expected 123.")
+ }
+ rsp = NewRecorder()
+ routeMatch.Handler.ServeHTTP(rsp, request)
+ if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" {
+ t.Errorf("Expected redirect header.")
+ }
+}
+
+// Test for the new regexp library, still not available in stable Go.
+func TestNewRegexp(t *testing.T) {
+ var p *routeRegexp
+ var matches []string
+
+ tests := map[string]map[string][]string{
+ "/{foo:a{2}}": {
+ "/a": nil,
+ "/aa": {"aa"},
+ "/aaa": nil,
+ "/aaaa": nil,
+ },
+ "/{foo:a{2,}}": {
+ "/a": nil,
+ "/aa": {"aa"},
+ "/aaa": {"aaa"},
+ "/aaaa": {"aaaa"},
+ },
+ "/{foo:a{2,3}}": {
+ "/a": nil,
+ "/aa": {"aa"},
+ "/aaa": {"aaa"},
+ "/aaaa": nil,
+ },
+ "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": {
+ "/a": nil,
+ "/ab": nil,
+ "/abc": nil,
+ "/abcd": nil,
+ "/abc/ab": {"abc", "ab"},
+ "/abc/abc": nil,
+ "/abcd/ab": nil,
+ },
+ `/{foo:\w{3,}}/{bar:\d{2,}}`: {
+ "/a": nil,
+ "/ab": nil,
+ "/abc": nil,
+ "/abc/1": nil,
+ "/abc/12": {"abc", "12"},
+ "/abcd/12": {"abcd", "12"},
+ "/abcd/123": {"abcd", "123"},
+ },
+ }
+
+ for pattern, paths := range tests {
+ p, _ = newRouteRegexp(pattern, false, false, false, false)
+ for path, result := range paths {
+ matches = p.regexp.FindStringSubmatch(path)
+ if result == nil {
+ if matches != nil {
+ t.Errorf("%v should not match %v.", pattern, path)
+ }
+ } else {
+ if len(matches) != len(result)+1 {
+ t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches))
+ } else {
+ for k, v := range result {
+ if matches[k+1] != v {
+ t.Errorf("Expected %v, got %v.", v, matches[k+1])
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go
new file mode 100644
index 00000000..a6305483
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,276 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := braceIndices(tpl)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ // Backup the original.
+ template := tpl
+ // Now let's parse it.
+ defaultPattern := "[^/]+"
+ if matchQuery {
+ defaultPattern = "[^?&]+"
+ matchPrefix = true
+ } else if matchHost {
+ defaultPattern = "[^.]+"
+ matchPrefix = false
+ }
+ // Only match strict slash if not matching
+ if matchPrefix || matchHost || matchQuery {
+ strictSlash = false
+ }
+ // Set a flag for strictSlash.
+ endSlash := false
+ if strictSlash && strings.HasSuffix(tpl, "/") {
+ tpl = tpl[:len(tpl)-1]
+ endSlash = true
+ }
+ varsN := make([]string, len(idxs)/2)
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+ if !matchQuery {
+ pattern.WriteByte('^')
+ }
+ reverse := bytes.NewBufferString("")
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+ name := parts[0]
+ patt := defaultPattern
+ if len(parts) == 2 {
+ patt = parts[1]
+ }
+ // Name or pattern can't be empty.
+ if name == "" || patt == "" {
+ return nil, fmt.Errorf("mux: missing name or pattern in %q",
+ tpl[idxs[i]:end])
+ }
+ // Build the regexp pattern.
+ fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt)
+ // Build the reverse template.
+ fmt.Fprintf(reverse, "%s%%s", raw)
+ // Append variable name and compiled pattern.
+ varsN[i/2] = name
+ varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Add the remaining.
+ raw := tpl[end:]
+ pattern.WriteString(regexp.QuoteMeta(raw))
+ if strictSlash {
+ pattern.WriteString("[/]?")
+ }
+ if !matchPrefix {
+ pattern.WriteByte('$')
+ }
+ reverse.WriteString(raw)
+ if endSlash {
+ reverse.WriteByte('/')
+ }
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+ // Done!
+ return &routeRegexp{
+ template: template,
+ matchHost: matchHost,
+ matchQuery: matchQuery,
+ strictSlash: strictSlash,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
+ }, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+ // The unmodified template.
+ template string
+ // True for host match, false for path or query string match.
+ matchHost bool
+ // True for query string match, false for path and host match.
+ matchQuery bool
+ // The strictSlash value defined on the route, but disabled if PathPrefix was used.
+ strictSlash bool
+ // Expanded regexp.
+ regexp *regexp.Regexp
+ // Reverse template.
+ reverse string
+ // Variable names.
+ varsN []string
+ // Variable regexps (validators).
+ varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+ if !r.matchHost {
+ if r.matchQuery {
+ return r.regexp.MatchString(req.URL.RawQuery)
+ } else {
+ return r.regexp.MatchString(req.URL.Path)
+ }
+ }
+ return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(pairs ...string) (string, error) {
+ values, err := mapFromPairs(pairs...)
+ if err != nil {
+ return "", err
+ }
+ urlValues := make([]interface{}, len(r.varsN))
+ for k, v := range r.varsN {
+ value, ok := values[v]
+ if !ok {
+ return "", fmt.Errorf("mux: missing route variable %q", v)
+ }
+ urlValues[k] = value
+ }
+ rv := fmt.Sprintf(r.reverse, urlValues...)
+ if !r.regexp.MatchString(rv) {
+ // The URL is checked against the full regexp, instead of checking
+ // individual variables. This is faster but to provide a good error
+ // message, we check individual regexps if the URL doesn't match.
+ for k, v := range r.varsN {
+ if !r.varsR[k].MatchString(values[v]) {
+ return "", fmt.Errorf(
+ "mux: variable %q doesn't match, expected %q", values[v],
+ r.varsR[k].String())
+ }
+ }
+ }
+ return rv, nil
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+ var level, idx int
+ idxs := make([]int, 0)
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '{':
+ if level++; level == 1 {
+ idx = i
+ }
+ case '}':
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ }
+ }
+ if level != 0 {
+ return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+ }
+ return idxs, nil
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+ host *routeRegexp
+ path *routeRegexp
+ queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+ // Store host variables.
+ if v.host != nil {
+ hostVars := v.host.regexp.FindStringSubmatch(getHost(req))
+ if hostVars != nil {
+ for k, v := range v.host.varsN {
+ m.Vars[v] = hostVars[k+1]
+ }
+ }
+ }
+ // Store path variables.
+ if v.path != nil {
+ pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path)
+ if pathVars != nil {
+ for k, v := range v.path.varsN {
+ m.Vars[v] = pathVars[k+1]
+ }
+ // Check if we should redirect.
+ if v.path.strictSlash {
+ p1 := strings.HasSuffix(req.URL.Path, "/")
+ p2 := strings.HasSuffix(v.path.template, "/")
+ if p1 != p2 {
+ u, _ := url.Parse(req.URL.String())
+ if p1 {
+ u.Path = u.Path[:len(u.Path)-1]
+ } else {
+ u.Path += "/"
+ }
+ m.Handler = http.RedirectHandler(u.String(), 301)
+ }
+ }
+ }
+ }
+ // Store query string variables.
+ rawQuery := req.URL.RawQuery
+ for _, q := range v.queries {
+ queryVars := q.regexp.FindStringSubmatch(rawQuery)
+ if queryVars != nil {
+ for k, v := range q.varsN {
+ m.Vars[v] = queryVars[k+1]
+ }
+ }
+ }
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+ if r.URL.IsAbs() {
+ return r.URL.Host
+ }
+ host := r.Host
+ // Slice off any port information.
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ return host
+
+}
diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/gorilla/mux/route.go
new file mode 100644
index 00000000..c310e66b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/gorilla/mux/route.go
@@ -0,0 +1,524 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+ // Parent where the route was registered (a Router).
+ parent parentRoute
+ // Request handler for the route.
+ handler http.Handler
+ // List of matchers.
+ matchers []matcher
+ // Manager for the variables from host and path.
+ regexp *routeRegexpGroup
+ // If true, when the path pattern is "/path/", accessing "/path" will
+ // redirect to the former and vice versa.
+ strictSlash bool
+ // If true, this route never matches: it is only used to build URLs.
+ buildOnly bool
+ // The name used to build URLs.
+ name string
+ // Error resulted from building a route.
+ err error
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+ if r.buildOnly || r.err != nil {
+ return false
+ }
+ // Match everything.
+ for _, m := range r.matchers {
+ if matched := m.Match(req, match); !matched {
+ return false
+ }
+ }
+ // Yay, we have a match. Let's collect some info about it.
+ if match.Route == nil {
+ match.Route = r
+ }
+ if match.Handler == nil {
+ match.Handler = r.handler
+ }
+ if match.Vars == nil {
+ match.Vars = make(map[string]string)
+ }
+ // Set variables.
+ if r.regexp != nil {
+ r.regexp.setMatch(req, match, r)
+ }
+ return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+ return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+ r.buildOnly = true
+ return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+ if r.err == nil {
+ r.handler = handler
+ }
+ return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+ return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+ return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+ if r.name != "" {
+ r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+ r.name, name)
+ }
+ if r.err == nil {
+ r.name = name
+ r.getNamedRoutes()[name] = r
+ }
+ return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+ return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+ Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+ if r.err == nil {
+ r.matchers = append(r.matchers, m)
+ }
+ return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+ if r.err != nil {
+ return r.err
+ }
+ r.regexp = r.getRegexpGroup()
+ if !matchHost && !matchQuery {
+ if len(tpl) == 0 || tpl[0] != '/' {
+ return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+ }
+ if r.regexp.path != nil {
+ tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+ }
+ }
+ rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)
+ if err != nil {
+ return err
+ }
+ for _, q := range r.regexp.queries {
+ if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+ return err
+ }
+ }
+ if matchHost {
+ if r.regexp.path != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+ return err
+ }
+ }
+ r.regexp.host = rr
+ } else {
+ if r.regexp.host != nil {
+ if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+ return err
+ }
+ }
+ if matchQuery {
+ r.regexp.queries = append(r.regexp.queries, rr)
+ } else {
+ r.regexp.path = rr
+ }
+ }
+ r.addMatcher(rr)
+ return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchMap(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+// r := mux.NewRouter()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+//
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+ if r.err == nil {
+ var headers map[string]string
+ headers, r.err = mapFromPairs(pairs...)
+ return r.addMatcher(headerMatcher(headers))
+ }
+ return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to me matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Host("www.domain.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, true, false, false)
+ return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+ return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+ return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+ for k, v := range methods {
+ methods[k] = strings.ToUpper(v)
+ }
+ return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to me matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+// r := mux.NewRouter()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, false, false, false)
+ return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+ r.err = r.addRegexpMatcher(tpl, false, true, false)
+ return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+// r := mux.NewRouter()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to me matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+ length := len(pairs)
+ if length%2 != 0 {
+ r.err = fmt.Errorf(
+ "mux: number of parameters must be multiple of 2, got %v", pairs)
+ return nil
+ }
+ for i := 0; i < length; i += 2 {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil {
+ return r
+ }
+ }
+
+ return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+ return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+ for k, v := range schemes {
+ schemes[k] = strings.ToLower(v)
+ }
+ return r.addMatcher(schemeMatcher(schemes))
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+// r := mux.NewRouter()
+// s := r.Host("www.domain.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+ router := &Router{parent: r, strictSlash: r.strictSlash}
+ r.addMatcher(router)
+ return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// ...a URL for it can be built using:
+//
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+// "/articles/technology/42"
+//
+// This also works for host variables:
+//
+// r := mux.NewRouter()
+// r.Host("{subdomain}.domain.com").
+// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
+//
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil {
+ return nil, errors.New("mux: route doesn't have a host or path")
+ }
+ var scheme, host, path string
+ var err error
+ if r.regexp.host != nil {
+ // Set a default scheme.
+ scheme = "http"
+ if host, err = r.regexp.host.url(pairs...); err != nil {
+ return nil, err
+ }
+ }
+ if r.regexp.path != nil {
+ if path, err = r.regexp.path.url(pairs...); err != nil {
+ return nil, err
+ }
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ }, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.host == nil {
+ return nil, errors.New("mux: route doesn't have a host")
+ }
+ host, err := r.regexp.host.url(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host,
+ }, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return nil, errors.New("mux: route doesn't have a path")
+ }
+ path, err := r.regexp.path.url(pairs...)
+ if err != nil {
+ return nil, err
+ }
+ return &url.URL{
+ Path: path,
+ }, nil
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+ getNamedRoutes() map[string]*Route
+ getRegexpGroup() *routeRegexpGroup
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+ if r.regexp == nil {
+ if r.parent == nil {
+ // During tests router is not always set.
+ r.parent = NewRouter()
+ }
+ regexp := r.parent.getRegexpGroup()
+ if regexp == nil {
+ r.regexp = new(routeRegexpGroup)
+ } else {
+ // Copy.
+ r.regexp = &routeRegexpGroup{
+ host: regexp.host,
+ path: regexp.path,
+ queries: regexp.queries,
+ }
+ }
+ }
+ return r.regexp
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore
new file mode 100644
index 00000000..83c8f823
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE
new file mode 100644
index 00000000..363fa9ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md
new file mode 100644
index 00000000..e0091a4b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/README.md
@@ -0,0 +1,104 @@
+go-metrics
+==========
+
+Go port of Coda Hale's Metrics library: .
+
+Documentation: .
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite:
+
+```go
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+```go
+import "github.com/rcrowley/go-metrics/influxdb"
+
+go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
+ Host: "127.0.0.1:8086",
+ Database: "metrics",
+ Username: "test",
+ Password: "test",
+})
+```
+
+Periodically upload every metric to Librato:
+
+```go
+import "github.com/rcrowley/go-metrics/librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+ 10e9, // interval
+ "example@example.com", // account owner email address
+ "token", // Librato API token
+ "hostname", // source
+ []float64{0.95}, // precentiles to send
+ time.Millisecond, // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go
new file mode 100644
index 00000000..dddaf4b1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-bench/metrics-bench.go
@@ -0,0 +1,20 @@
+package main
+
+import (
+ "fmt"
+ "github.com/rcrowley/go-metrics"
+ "time"
+)
+
+func main() {
+ r := metrics.NewRegistry()
+ for i := 0; i < 10000; i++ {
+ r.Register(fmt.Sprintf("counter-%d", i), metrics.NewCounter())
+ r.Register(fmt.Sprintf("gauge-%d", i), metrics.NewGauge())
+ r.Register(fmt.Sprintf("gaugefloat64-%d", i), metrics.NewGaugeFloat64())
+ r.Register(fmt.Sprintf("histogram-uniform-%d", i), metrics.NewHistogram(metrics.NewUniformSample(1028)))
+ r.Register(fmt.Sprintf("histogram-exp-%d", i), metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)))
+ r.Register(fmt.Sprintf("meter-%d", i), metrics.NewMeter())
+ }
+ time.Sleep(600e9)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go
new file mode 100644
index 00000000..66f42c04
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/metrics-example/metrics-example.go
@@ -0,0 +1,154 @@
+package main
+
+import (
+ "errors"
+ "github.com/rcrowley/go-metrics"
+ // "github.com/rcrowley/go-metrics/stathat"
+ "log"
+ "math/rand"
+ "os"
+ // "syslog"
+ "time"
+)
+
+const fanout = 10
+
+func main() {
+
+ r := metrics.NewRegistry()
+
+ c := metrics.NewCounter()
+ r.Register("foo", c)
+ for i := 0; i < fanout; i++ {
+ go func() {
+ for {
+ c.Dec(19)
+ time.Sleep(300e6)
+ }
+ }()
+ go func() {
+ for {
+ c.Inc(47)
+ time.Sleep(400e6)
+ }
+ }()
+ }
+
+ g := metrics.NewGauge()
+ r.Register("bar", g)
+ for i := 0; i < fanout; i++ {
+ go func() {
+ for {
+ g.Update(19)
+ time.Sleep(300e6)
+ }
+ }()
+ go func() {
+ for {
+ g.Update(47)
+ time.Sleep(400e6)
+ }
+ }()
+ }
+
+ gf := metrics.NewGaugeFloat64()
+ r.Register("barfloat64", gf)
+ for i := 0; i < fanout; i++ {
+ go func() {
+ for {
+ g.Update(19.0)
+ time.Sleep(300e6)
+ }
+ }()
+ go func() {
+ for {
+ g.Update(47.0)
+ time.Sleep(400e6)
+ }
+ }()
+ }
+
+ hc := metrics.NewHealthcheck(func(h metrics.Healthcheck) {
+ if 0 < rand.Intn(2) {
+ h.Healthy()
+ } else {
+ h.Unhealthy(errors.New("baz"))
+ }
+ })
+ r.Register("baz", hc)
+
+ s := metrics.NewExpDecaySample(1028, 0.015)
+ //s := metrics.NewUniformSample(1028)
+ h := metrics.NewHistogram(s)
+ r.Register("bang", h)
+ for i := 0; i < fanout; i++ {
+ go func() {
+ for {
+ h.Update(19)
+ time.Sleep(300e6)
+ }
+ }()
+ go func() {
+ for {
+ h.Update(47)
+ time.Sleep(400e6)
+ }
+ }()
+ }
+
+ m := metrics.NewMeter()
+ r.Register("quux", m)
+ for i := 0; i < fanout; i++ {
+ go func() {
+ for {
+ m.Mark(19)
+ time.Sleep(300e6)
+ }
+ }()
+ go func() {
+ for {
+ m.Mark(47)
+ time.Sleep(400e6)
+ }
+ }()
+ }
+
+ t := metrics.NewTimer()
+ r.Register("hooah", t)
+ for i := 0; i < fanout; i++ {
+ go func() {
+ for {
+ t.Time(func() { time.Sleep(300e6) })
+ }
+ }()
+ go func() {
+ for {
+ t.Time(func() { time.Sleep(400e6) })
+ }
+ }()
+ }
+
+ metrics.RegisterDebugGCStats(r)
+ go metrics.CaptureDebugGCStats(r, 5e9)
+
+ metrics.RegisterRuntimeMemStats(r)
+ go metrics.CaptureRuntimeMemStats(r, 5e9)
+
+ metrics.Log(r, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+
+ /*
+ w, err := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+ if nil != err { log.Fatalln(err) }
+ metrics.Syslog(r, 60e9, w)
+ */
+
+ /*
+ addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+ metrics.Graphite(r, 10e9, "metrics", addr)
+ */
+
+ /*
+ stathat.Stathat(r, 10e9, "example@example.com")
+ */
+
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go
new file mode 100644
index 00000000..dc175b77
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/cmd/never-read/never-read.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+ "log"
+ "net"
+)
+
+func main() {
+ addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+ l, err := net.ListenTCP("tcp", addr)
+ if nil != err {
+ log.Fatalln(err)
+ }
+ log.Println("listening", l.Addr())
+ for {
+ c, err := l.AcceptTCP()
+ if nil != err {
+ log.Fatalln(err)
+ }
+ log.Println("accepted", c.RemoteAddr())
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go
new file mode 100644
index 00000000..bb7b039c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+ Clear()
+ Count() int64
+ Dec(int64)
+ Inc(int64)
+ Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+ if UseNilMetrics {
+ return NilCounter{}
+ }
+ return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+ c := NewCounter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+ panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+ panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+ panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+ count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+ atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+ return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+ atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+ atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+ return CounterSnapshot(c.Count())
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go
new file mode 100644
index 00000000..dfb03b4e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/counter_test.go
@@ -0,0 +1,77 @@
+package metrics
+
+import "testing"
+
+func BenchmarkCounter(b *testing.B) {
+ c := NewCounter()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ c.Inc(1)
+ }
+}
+
+func TestCounterClear(t *testing.T) {
+ c := NewCounter()
+ c.Inc(1)
+ c.Clear()
+ if count := c.Count(); 0 != count {
+ t.Errorf("c.Count(): 0 != %v\n", count)
+ }
+}
+
+func TestCounterDec1(t *testing.T) {
+ c := NewCounter()
+ c.Dec(1)
+ if count := c.Count(); -1 != count {
+ t.Errorf("c.Count(): -1 != %v\n", count)
+ }
+}
+
+func TestCounterDec2(t *testing.T) {
+ c := NewCounter()
+ c.Dec(2)
+ if count := c.Count(); -2 != count {
+ t.Errorf("c.Count(): -2 != %v\n", count)
+ }
+}
+
+func TestCounterInc1(t *testing.T) {
+ c := NewCounter()
+ c.Inc(1)
+ if count := c.Count(); 1 != count {
+ t.Errorf("c.Count(): 1 != %v\n", count)
+ }
+}
+
+func TestCounterInc2(t *testing.T) {
+ c := NewCounter()
+ c.Inc(2)
+ if count := c.Count(); 2 != count {
+ t.Errorf("c.Count(): 2 != %v\n", count)
+ }
+}
+
+func TestCounterSnapshot(t *testing.T) {
+ c := NewCounter()
+ c.Inc(1)
+ snapshot := c.Snapshot()
+ c.Inc(1)
+ if count := snapshot.Count(); 1 != count {
+ t.Errorf("c.Count(): 1 != %v\n", count)
+ }
+}
+
+func TestCounterZero(t *testing.T) {
+ c := NewCounter()
+ if count := c.Count(); 0 != count {
+ t.Errorf("c.Count(): 0 != %v\n", count)
+ }
+}
+
+func TestGetOrRegisterCounter(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredCounter("foo", r).Inc(47)
+ if c := GetOrRegisterCounter("foo", r); 47 != c.Count() {
+ t.Fatal(c)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go
new file mode 100644
index 00000000..043ccefa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+ "runtime/debug"
+ "time"
+)
+
+var (
+ debugMetrics struct {
+ GCStats struct {
+ LastGC Gauge
+ NumGC Gauge
+ Pause Histogram
+ //PauseQuantiles Histogram
+ PauseTotal Gauge
+ }
+ ReadGCStats Timer
+ }
+ gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureDebugGCStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+ lastGC := gcStats.LastGC
+ t := time.Now()
+ debug.ReadGCStats(&gcStats)
+ debugMetrics.ReadGCStats.UpdateSince(t)
+
+ debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+ debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+ if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+ debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+ }
+ //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+ debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+ debugMetrics.GCStats.LastGC = NewGauge()
+ debugMetrics.GCStats.NumGC = NewGauge()
+ debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+ //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+ debugMetrics.GCStats.PauseTotal = NewGauge()
+ debugMetrics.ReadGCStats = NewTimer()
+
+ r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+ r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+ r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+ //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+ r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+ r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+ gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go
new file mode 100644
index 00000000..07eb8678
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/debug_test.go
@@ -0,0 +1,48 @@
+package metrics
+
+import (
+ "runtime"
+ "runtime/debug"
+ "testing"
+ "time"
+)
+
+func BenchmarkDebugGCStats(b *testing.B) {
+ r := NewRegistry()
+ RegisterDebugGCStats(r)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ CaptureDebugGCStatsOnce(r)
+ }
+}
+
+func TestDebugGCStatsBlocking(t *testing.T) {
+ if g := runtime.GOMAXPROCS(0); g < 2 {
+ t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g)
+ return
+ }
+ ch := make(chan int)
+ go testDebugGCStatsBlocking(ch)
+ var gcStats debug.GCStats
+ t0 := time.Now()
+ debug.ReadGCStats(&gcStats)
+ t1 := time.Now()
+ t.Log("i++ during debug.ReadGCStats:", <-ch)
+ go testDebugGCStatsBlocking(ch)
+ d := t1.Sub(t0)
+ t.Log(d)
+ time.Sleep(d)
+ t.Log("i++ during time.Sleep:", <-ch)
+}
+
+func testDebugGCStatsBlocking(ch chan int) {
+ i := 0
+ for {
+ select {
+ case ch <- i:
+ return
+ default:
+ i++
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go
new file mode 100644
index 00000000..7c152a17
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma.go
@@ -0,0 +1,118 @@
+package metrics
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+ Rate() float64
+ Snapshot() EWMA
+ Tick()
+ Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+ if UseNilMetrics {
+ return NilEWMA{}
+ }
+ return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+ panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+ panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick. It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+ uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ alpha float64
+ rate float64
+ init bool
+ mutex sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ return a.rate * float64(1e9)
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+ return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average. It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+ count := atomic.LoadInt64(&a.uncounted)
+ atomic.AddInt64(&a.uncounted, -count)
+ instantRate := float64(count) / float64(5e9)
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ if a.init {
+ a.rate += a.alpha * (instantRate - a.rate)
+ } else {
+ a.init = true
+ a.rate = instantRate
+ }
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+ atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go
new file mode 100644
index 00000000..0430fbd2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/ewma_test.go
@@ -0,0 +1,225 @@
+package metrics
+
+import "testing"
+
+func BenchmarkEWMA(b *testing.B) {
+ a := NewEWMA1()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ a.Update(1)
+ a.Tick()
+ }
+}
+
+func TestEWMA1(t *testing.T) {
+ a := NewEWMA1()
+ a.Update(3)
+ a.Tick()
+ if rate := a.Rate(); 0.6 != rate {
+ t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.22072766470286553 != rate {
+ t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.08120116994196772 != rate {
+ t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.029872241020718428 != rate {
+ t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.01098938333324054 != rate {
+ t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.004042768199451294 != rate {
+ t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.0014872513059998212 != rate {
+ t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.0005471291793327122 != rate {
+ t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.00020127757674150815 != rate {
+ t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 7.404588245200814e-05 != rate {
+ t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 2.7239957857491083e-05 != rate {
+ t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 1.0021020474147462e-05 != rate {
+ t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 3.6865274119969525e-06 != rate {
+ t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 1.3561976441886433e-06 != rate {
+ t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 4.989172314621449e-07 != rate {
+ t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 1.8354139230109722e-07 != rate {
+ t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
+ }
+}
+
+func TestEWMA5(t *testing.T) {
+ a := NewEWMA5()
+ a.Update(3)
+ a.Tick()
+ if rate := a.Rate(); 0.6 != rate {
+ t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.49123845184678905 != rate {
+ t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.4021920276213837 != rate {
+ t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.32928698165641596 != rate {
+ t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.269597378470333 != rate {
+ t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.2207276647028654 != rate {
+ t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.18071652714732128 != rate {
+ t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.14795817836496392 != rate {
+ t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.12113791079679326 != rate {
+ t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.09917933293295193 != rate {
+ t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.08120116994196763 != rate {
+ t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.06648189501740036 != rate {
+ t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.05443077197364752 != rate {
+ t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.04456414692860035 != rate {
+ t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.03648603757513079 != rate {
+ t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.0298722410207183831020718428 != rate {
+ t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
+ }
+}
+
+func TestEWMA15(t *testing.T) {
+ a := NewEWMA15()
+ a.Update(3)
+ a.Tick()
+ if rate := a.Rate(); 0.6 != rate {
+ t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.5613041910189706 != rate {
+ t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.5251039914257684 != rate {
+ t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.4912384518467888184678905 != rate {
+ t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.459557003018789 != rate {
+ t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.4299187863442732 != rate {
+ t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.4021920276213831 != rate {
+ t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.37625345116383313 != rate {
+ t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.3519877317060185 != rate {
+ t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.3292869816564153165641596 != rate {
+ t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.3080502714195546 != rate {
+ t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.2881831806538789 != rate {
+ t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.26959737847033216 != rate {
+ t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.2522102307052083 != rate {
+ t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.23594443252115815 != rate {
+ t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
+ }
+ elapseMinute(a)
+ if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate {
+ t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
+ }
+}
+
+func elapseMinute(a EWMA) {
+ for i := 0; i < 12; i++ {
+ a.Tick()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go
new file mode 100644
index 00000000..807638a3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge.go
@@ -0,0 +1,84 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+ Snapshot() Gauge
+ Update(int64)
+ Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+ c := NewGauge()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+ panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+ value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+ return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+ atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+ return atomic.LoadInt64(&g.value)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go
new file mode 100644
index 00000000..47c3566c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64.go
@@ -0,0 +1,91 @@
+package metrics
+
+import "sync"
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+ Snapshot() GaugeFloat64
+ Update(float64)
+ Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &StandardGaugeFloat64{
+ value: 0.0,
+ }
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ c := NewGaugeFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+ panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+ mutex sync.Mutex
+ value float64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+ return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ g.value = v
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ return g.value
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go
new file mode 100644
index 00000000..5d0aae27
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_float64_test.go
@@ -0,0 +1,38 @@
+package metrics
+
+import "testing"
+
+func BenchmarkGuageFloat64(b *testing.B) {
+ g := NewGaugeFloat64()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ g.Update(float64(i))
+ }
+}
+
+func TestGaugeFloat64(t *testing.T) {
+ g := NewGaugeFloat64()
+ g.Update(float64(47.0))
+ if v := g.Value(); float64(47.0) != v {
+ t.Errorf("g.Value(): 47.0 != %v\n", v)
+ }
+}
+
+func TestGaugeFloat64Snapshot(t *testing.T) {
+ g := NewGaugeFloat64()
+ g.Update(float64(47.0))
+ snapshot := g.Snapshot()
+ g.Update(float64(0))
+ if v := snapshot.Value(); float64(47.0) != v {
+ t.Errorf("g.Value(): 47.0 != %v\n", v)
+ }
+}
+
+func TestGetOrRegisterGaugeFloat64(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0))
+ t.Logf("registry: %v", r)
+ if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() {
+ t.Fatal(g)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go
new file mode 100644
index 00000000..50849629
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/gauge_test.go
@@ -0,0 +1,37 @@
+package metrics
+
+import "testing"
+
+func BenchmarkGuage(b *testing.B) {
+ g := NewGauge()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ g.Update(int64(i))
+ }
+}
+
+func TestGauge(t *testing.T) {
+ g := NewGauge()
+ g.Update(int64(47))
+ if v := g.Value(); 47 != v {
+ t.Errorf("g.Value(): 47 != %v\n", v)
+ }
+}
+
+func TestGaugeSnapshot(t *testing.T) {
+ g := NewGauge()
+ g.Update(int64(47))
+ snapshot := g.Snapshot()
+ g.Update(int64(0))
+ if v := snapshot.Value(); 47 != v {
+ t.Errorf("g.Value(): 47 != %v\n", v)
+ }
+}
+
+func TestGetOrRegisterGauge(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredGauge("foo", r).Update(47)
+ if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
+ t.Fatal(g)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go
new file mode 100644
index 00000000..643b3ec5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite.go
@@ -0,0 +1,104 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+ Percentiles []float64 // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ GraphiteWithConfig(GraphiteConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+ })
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := graphite(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+func graphite(c *GraphiteConfig) error {
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ case Gauge:
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, int64(du)*t.Min(), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, int64(du)*t.Max(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, du*t.Mean(), now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, du*t.StdDev(), now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go
new file mode 100644
index 00000000..b49dc4bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/graphite_test.go
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+ "net"
+ "time"
+)
+
+func ExampleGraphite() {
+ addr, _ := net.ResolveTCPAddr("net", ":2003")
+ go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr)
+}
+
+func ExampleGraphiteWithConfig() {
+ addr, _ := net.ResolveTCPAddr("net", ":2003")
+ go GraphiteWithConfig(GraphiteConfig{
+ Addr: addr,
+ Registry: DefaultRegistry,
+ FlushInterval: 1 * time.Second,
+ DurationUnit: time.Millisecond,
+ Percentiles: []float64{ 0.5, 0.75, 0.99, 0.999 },
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go
new file mode 100644
index 00000000..445131ca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+ Check()
+ Error() error
+ Healthy()
+ Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+ if UseNilMetrics {
+ return NilHealthcheck{}
+ }
+ return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+ err error
+ f func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+ h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+ return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+ h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy. The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+ h.err = err
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go
new file mode 100644
index 00000000..7f3ee70c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram.go
@@ -0,0 +1,192 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Sample() Sample
+ Snapshot() Histogram
+ StdDev() float64
+ Update(int64)
+ Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+ if UseNilMetrics {
+ return NilHistogram{}
+ }
+ return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+ c := NewHistogram(s)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+ sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+ panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+ panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+ sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+ return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go
new file mode 100644
index 00000000..d7f4f017
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/histogram_test.go
@@ -0,0 +1,95 @@
+package metrics
+
+import "testing"
+
+func BenchmarkHistogram(b *testing.B) {
+ h := NewHistogram(NewUniformSample(100))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ h.Update(int64(i))
+ }
+}
+
+func TestGetOrRegisterHistogram(t *testing.T) {
+ r := NewRegistry()
+ s := NewUniformSample(100)
+ NewRegisteredHistogram("foo", r, s).Update(47)
+ if h := GetOrRegisterHistogram("foo", r, s); 1 != h.Count() {
+ t.Fatal(h)
+ }
+}
+
+func TestHistogram10000(t *testing.T) {
+ h := NewHistogram(NewUniformSample(100000))
+ for i := 1; i <= 10000; i++ {
+ h.Update(int64(i))
+ }
+ testHistogram10000(t, h)
+}
+
+func TestHistogramEmpty(t *testing.T) {
+ h := NewHistogram(NewUniformSample(100))
+ if count := h.Count(); 0 != count {
+ t.Errorf("h.Count(): 0 != %v\n", count)
+ }
+ if min := h.Min(); 0 != min {
+ t.Errorf("h.Min(): 0 != %v\n", min)
+ }
+ if max := h.Max(); 0 != max {
+ t.Errorf("h.Max(): 0 != %v\n", max)
+ }
+ if mean := h.Mean(); 0.0 != mean {
+ t.Errorf("h.Mean(): 0.0 != %v\n", mean)
+ }
+ if stdDev := h.StdDev(); 0.0 != stdDev {
+ t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
+ }
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+ if 0.0 != ps[0] {
+ t.Errorf("median: 0.0 != %v\n", ps[0])
+ }
+ if 0.0 != ps[1] {
+ t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
+ }
+ if 0.0 != ps[2] {
+ t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
+ }
+}
+
+func TestHistogramSnapshot(t *testing.T) {
+ h := NewHistogram(NewUniformSample(100000))
+ for i := 1; i <= 10000; i++ {
+ h.Update(int64(i))
+ }
+ snapshot := h.Snapshot()
+ h.Update(0)
+ testHistogram10000(t, snapshot)
+}
+
+func testHistogram10000(t *testing.T, h Histogram) {
+ if count := h.Count(); 10000 != count {
+ t.Errorf("h.Count(): 10000 != %v\n", count)
+ }
+ if min := h.Min(); 1 != min {
+ t.Errorf("h.Min(): 1 != %v\n", min)
+ }
+ if max := h.Max(); 10000 != max {
+ t.Errorf("h.Max(): 10000 != %v\n", max)
+ }
+ if mean := h.Mean(); 5000.5 != mean {
+ t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
+ }
+ if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
+ t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
+ }
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
+ if 5000.5 != ps[0] {
+ t.Errorf("median: 5000.5 != %v\n", ps[0])
+ }
+ if 7500.75 != ps[1] {
+ t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
+ }
+ if 9900.99 != ps[2] {
+ t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go
new file mode 100644
index 00000000..0163c9b4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/influxdb/influxdb.go
@@ -0,0 +1,114 @@
+package influxdb
+
+import (
+ "fmt"
+ influxClient "github.com/influxdb/influxdb/client"
+ "github.com/rcrowley/go-metrics"
+ "log"
+ "time"
+)
+
+type Config struct {
+ Host string
+ Database string
+ Username string
+ Password string
+}
+
+func Influxdb(r metrics.Registry, d time.Duration, config *Config) {
+ client, err := influxClient.NewClient(&influxClient.ClientConfig{
+ Host: config.Host,
+ Database: config.Database,
+ Username: config.Username,
+ Password: config.Password,
+ })
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ for _ = range time.Tick(d) {
+ if err := send(r, client); err != nil {
+ log.Println(err)
+ }
+ }
+}
+
+func send(r metrics.Registry, client *influxClient.Client) error {
+ series := []*influxClient.Series{}
+
+ r.Each(func(name string, i interface{}) {
+ now := getCurrentTime()
+ switch metric := i.(type) {
+ case metrics.Counter:
+ series = append(series, &influxClient.Series{
+ Name: fmt.Sprintf("%s.count", name),
+ Columns: []string{"time", "count"},
+ Points: [][]interface{}{
+ {now, metric.Count()},
+ },
+ })
+ case metrics.Gauge:
+ series = append(series, &influxClient.Series{
+ Name: fmt.Sprintf("%s.value", name),
+ Columns: []string{"time", "value"},
+ Points: [][]interface{}{
+ {now, metric.Value()},
+ },
+ })
+ case metrics.GaugeFloat64:
+ series = append(series, &influxClient.Series{
+ Name: fmt.Sprintf("%s.value", name),
+ Columns: []string{"time", "value"},
+ Points: [][]interface{}{
+ {now, metric.Value()},
+ },
+ })
+ case metrics.Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ series = append(series, &influxClient.Series{
+ Name: fmt.Sprintf("%s.histogram", name),
+ Columns: []string{"time", "count", "min", "max", "mean", "std-dev",
+ "50-percentile", "75-percentile", "95-percentile",
+ "99-percentile", "999-percentile"},
+ Points: [][]interface{}{
+ {now, h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(),
+ ps[0], ps[1], ps[2], ps[3], ps[4]},
+ },
+ })
+ case metrics.Meter:
+ m := metric.Snapshot()
+ series = append(series, &influxClient.Series{
+ Name: fmt.Sprintf("%s.meter", name),
+ Columns: []string{"count", "one-minute",
+ "five-minute", "fifteen-minute", "mean"},
+ Points: [][]interface{}{
+ {m.Count(), m.Rate1(), m.Rate5(), m.Rate15(), m.RateMean()},
+ },
+ })
+ case metrics.Timer:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ series = append(series, &influxClient.Series{
+ Name: fmt.Sprintf("%s.timer", name),
+ Columns: []string{"count", "min", "max", "mean", "std-dev",
+ "50-percentile", "75-percentile", "95-percentile",
+ "99-percentile", "999-percentile", "one-minute", "five-minute", "fifteen-minute", "mean-rate"},
+ Points: [][]interface{}{
+ {h.Count(), h.Min(), h.Max(), h.Mean(), h.StdDev(),
+ ps[0], ps[1], ps[2], ps[3], ps[4],
+ h.Rate1(), h.Rate5(), h.Rate15(), h.RateMean()},
+ },
+ })
+ }
+ })
+ if err := client.WriteSeries(series); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+func getCurrentTime() int64 {
+ return time.Now().UnixNano() / 1000000
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go
new file mode 100644
index 00000000..04a9c919
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json.go
@@ -0,0 +1,83 @@
+package metrics
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r StandardRegistry) MarshalJSON() ([]byte, error) {
+ data := make(map[string]map[string]interface{})
+ r.Each(func(name string, i interface{}) {
+ values := make(map[string]interface{})
+ switch metric := i.(type) {
+ case Counter:
+ values["count"] = metric.Count()
+ case Gauge:
+ values["value"] = metric.Value()
+ case GaugeFloat64:
+ values["value"] = metric.Value()
+ case Healthcheck:
+ values["error"] = nil
+ metric.Check()
+ if err := metric.Error(); nil != err {
+ values["error"] = metric.Error().Error()
+ }
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = h.Count()
+ values["min"] = h.Min()
+ values["max"] = h.Max()
+ values["mean"] = h.Mean()
+ values["stddev"] = h.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ case Meter:
+ m := metric.Snapshot()
+ values["count"] = m.Count()
+ values["1m.rate"] = m.Rate1()
+ values["5m.rate"] = m.Rate5()
+ values["15m.rate"] = m.Rate15()
+ values["mean.rate"] = m.RateMean()
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = t.Count()
+ values["min"] = t.Min()
+ values["max"] = t.Max()
+ values["mean"] = t.Mean()
+ values["stddev"] = t.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ values["1m.rate"] = t.Rate1()
+ values["5m.rate"] = t.Rate5()
+ values["15m.rate"] = t.Rate15()
+ values["mean.rate"] = t.RateMean()
+ }
+ data[name] = values
+ })
+ return json.Marshal(data)
+}
+
+// WriteJSON writes metrics from the given registry periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteJSONOnce(r, w)
+ }
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+ json.NewEncoder(w).Encode(r)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go
new file mode 100644
index 00000000..cf70051f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/json_test.go
@@ -0,0 +1,28 @@
+package metrics
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+)
+
+func TestRegistryMarshallJSON(t *testing.T) {
+ b := &bytes.Buffer{}
+ enc := json.NewEncoder(b)
+ r := NewRegistry()
+ r.Register("counter", NewCounter())
+ enc.Encode(r)
+ if s := b.String(); "{\"counter\":{\"count\":0}}\n" != s {
+ t.Fatalf(s)
+ }
+}
+
+func TestRegistryWriteJSONOnce(t *testing.T) {
+ r := NewRegistry()
+ r.Register("counter", NewCounter())
+ b := &bytes.Buffer{}
+ WriteJSONOnce(r, b)
+ if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
+ t.Fail()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go
new file mode 100644
index 00000000..8c0c850e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/client.go
@@ -0,0 +1,102 @@
+package librato
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+const Operations = "operations"
+const OperationsShort = "ops"
+
+type LibratoClient struct {
+ Email, Token string
+}
+
+// property strings
+const (
+ // display attributes
+ Color = "color"
+ DisplayMax = "display_max"
+ DisplayMin = "display_min"
+ DisplayUnitsLong = "display_units_long"
+ DisplayUnitsShort = "display_units_short"
+ DisplayStacked = "display_stacked"
+ DisplayTransform = "display_transform"
+ // special gauge display attributes
+ SummarizeFunction = "summarize_function"
+ Aggregate = "aggregate"
+
+ // metric keys
+ Name = "name"
+ Period = "period"
+ Description = "description"
+ DisplayName = "display_name"
+ Attributes = "attributes"
+
+ // measurement keys
+ MeasureTime = "measure_time"
+ Source = "source"
+ Value = "value"
+
+ // special gauge keys
+ Count = "count"
+ Sum = "sum"
+ Max = "max"
+ Min = "min"
+ SumSquares = "sum_squares"
+
+ // batch keys
+ Counters = "counters"
+ Gauges = "gauges"
+
+ MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
+)
+
+type Measurement map[string]interface{}
+type Metric map[string]interface{}
+
+type Batch struct {
+ Gauges []Measurement `json:"gauges,omitempty"`
+ Counters []Measurement `json:"counters,omitempty"`
+ MeasureTime int64 `json:"measure_time"`
+ Source string `json:"source"`
+}
+
+func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
+ var (
+ js []byte
+ req *http.Request
+ resp *http.Response
+ )
+
+ if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
+ return nil
+ }
+
+ if js, err = json.Marshal(batch); err != nil {
+ return
+ }
+
+ if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
+ return
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.SetBasicAuth(self.Email, self.Token)
+
+ if resp, err = http.DefaultClient.Do(req); err != nil {
+ return
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ var body []byte
+ if body, err = ioutil.ReadAll(resp.Body); err != nil {
+ body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
+ }
+ err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
+ }
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go
new file mode 100644
index 00000000..8cc35453
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/librato/librato.go
@@ -0,0 +1,244 @@
+package librato
+
+import (
+ "fmt"
+ "github.com/yvasiyarov/go-metrics"
+ "log"
+ "math"
+ "regexp"
+ "time"
+
+ //"github.com/rcrowley/go-metrics"
+)
+
+// a regexp for extracting the unit from time.Duration.String
+var unitRegexp = regexp.MustCompile("[^\\d]+$")
+
+// a helper that turns a time.Duration into librato display attributes for timer metrics
+func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
+ attrs = make(map[string]interface{})
+ attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
+ attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
+ return
+}
+
+type Reporter struct {
+ Email, Token string
+ Source string
+ Interval time.Duration
+ Registry metrics.Registry
+ Percentiles []float64 // percentiles to report on histogram metrics
+ TimerAttributes map[string]interface{} // units in which timers will be displayed
+ MetricPrefix string
+}
+
+func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
+ return &Reporter{
+ Email: e,
+ Token: t,
+ Source: s,
+ Interval: d,
+ Registry: r,
+ Percentiles: p,
+ TimerAttributes: translateTimerAttributes(u),
+ }
+}
+
+func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
+ NewReporter(r, d, e, t, s, p, u).Run()
+}
+
+func (self *Reporter) Run() {
+ ticker := time.Tick(self.Interval)
+ metricsApi := &LibratoClient{self.Email, self.Token}
+ for now := range ticker {
+
+ var metrics Batch
+ var err error
+ if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
+ log.Printf("ERROR constructing librato request body %s", err)
+ }
+
+ if err := metricsApi.PostMetrics(metrics); err != nil {
+ log.Printf("ERROR sending metrics to librato %s", err)
+ }
+ }
+}
+
+// calculate sum of squares from data provided by metrics.Histogram
+// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
+func sumSquares(s metrics.Sample) float64 {
+ count := float64(s.Count())
+ sumSquared := math.Pow(count*s.Mean(), 2)
+ sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
+ if math.IsNaN(sumSquares) {
+ return 0.0
+ }
+ return sumSquares
+}
+func sumSquaresTimer(t metrics.Timer) float64 {
+ count := float64(t.Count())
+ sumSquared := math.Pow(count*t.Mean(), 2)
+ sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
+ if math.IsNaN(sumSquares) {
+ return 0.0
+ }
+ return sumSquares
+}
+
+func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
+ snapshot = Batch{
+ MeasureTime: now.Unix(),
+ Source: self.Source,
+ }
+ snapshot.MeasureTime = now.Unix()
+ snapshot.Gauges = make([]Measurement, 0)
+ snapshot.Counters = make([]Measurement, 0)
+ histogramGaugeCount := 1 + len(self.Percentiles)
+ r.Each(func(name string, metric interface{}) {
+
+ if self.MetricPrefix != "" {
+ name = self.MetricPrefix + name
+ }
+ measurement := Measurement{}
+ measurement[Period] = self.Interval.Seconds()
+
+ switch m := metric.(type) {
+ case metrics.Counter:
+ if m.Count() > 0 {
+ measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
+ measurement[Value] = float64(m.Count())
+ measurement[Attributes] = map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ }
+ snapshot.Counters = append(snapshot.Counters, measurement)
+ }
+ case metrics.Gauge:
+ measurement[Name] = name
+ measurement[Value] = float64(m.Value())
+ snapshot.Gauges = append(snapshot.Gauges, measurement)
+ case metrics.GaugeFloat64:
+ measurement[Name] = name
+ measurement[Value] = float64(m.Value())
+ snapshot.Gauges = append(snapshot.Gauges, measurement)
+ case metrics.Histogram:
+ if m.Count() > 0 {
+ gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
+ s := m.Sample()
+ measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
+ measurement[Count] = uint64(s.Count())
+ measurement[Sum] = s.Sum()
+ measurement[Max] = float64(s.Max())
+ measurement[Min] = float64(s.Min())
+ measurement[SumSquares] = sumSquares(s)
+ gauges[0] = measurement
+ for i, p := range self.Percentiles {
+ gauges[i+1] = Measurement{
+ Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
+ Value: s.Percentile(p),
+ Period: measurement[Period],
+ }
+ }
+ snapshot.Gauges = append(snapshot.Gauges, gauges...)
+ }
+ case metrics.Meter:
+ measurement[Name] = name
+ measurement[Value] = float64(m.Count())
+ snapshot.Counters = append(snapshot.Counters, measurement)
+ snapshot.Gauges = append(snapshot.Gauges,
+ Measurement{
+ Name: fmt.Sprintf("%s.%s", name, "1min"),
+ Value: m.Rate1(),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ },
+ },
+ Measurement{
+ Name: fmt.Sprintf("%s.%s", name, "5min"),
+ Value: m.Rate5(),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ },
+ },
+ Measurement{
+ Name: fmt.Sprintf("%s.%s", name, "15min"),
+ Value: m.Rate15(),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ },
+ },
+ )
+ case metrics.Timer:
+ measurement[Name] = name
+ measurement[Value] = float64(m.Count())
+ snapshot.Counters = append(snapshot.Counters, measurement)
+ if m.Count() > 0 {
+ libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
+ gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
+ gauges[0] = Measurement{
+ Name: libratoName,
+ Count: uint64(m.Count()),
+ Sum: m.Mean() * float64(m.Count()),
+ Max: float64(m.Max()),
+ Min: float64(m.Min()),
+ SumSquares: sumSquaresTimer(m),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: self.TimerAttributes,
+ }
+ for i, p := range self.Percentiles {
+ gauges[i+1] = Measurement{
+ Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
+ Value: m.Percentile(p),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: self.TimerAttributes,
+ }
+ }
+ snapshot.Gauges = append(snapshot.Gauges, gauges...)
+ snapshot.Gauges = append(snapshot.Gauges,
+ Measurement{
+ Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
+ Value: m.Rate1(),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ },
+ },
+ Measurement{
+ Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
+ Value: m.Rate5(),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ },
+ },
+ Measurement{
+ Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
+ Value: m.Rate15(),
+ Period: int64(self.Interval.Seconds()),
+ Attributes: map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ },
+ },
+ )
+ }
+ }
+ })
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go
new file mode 100644
index 00000000..278a8a44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/log.go
@@ -0,0 +1,70 @@
+package metrics
+
+import (
+ "log"
+ "time"
+)
+
+// Output each metric in the given registry periodically using the given
+// logger.
+func Log(r Registry, d time.Duration, l *log.Logger) {
+ for _ = range time.Tick(d) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %9d\n", metric.Count())
+ case Gauge:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ l.Printf("healthcheck %s\n", name)
+ l.Printf(" error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("histogram %s\n", name)
+ l.Printf(" count: %9d\n", h.Count())
+ l.Printf(" min: %9d\n", h.Min())
+ l.Printf(" max: %9d\n", h.Max())
+ l.Printf(" mean: %12.2f\n", h.Mean())
+ l.Printf(" stddev: %12.2f\n", h.StdDev())
+ l.Printf(" median: %12.2f\n", ps[0])
+ l.Printf(" 75%%: %12.2f\n", ps[1])
+ l.Printf(" 95%%: %12.2f\n", ps[2])
+ l.Printf(" 99%%: %12.2f\n", ps[3])
+ l.Printf(" 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ l.Printf("meter %s\n", name)
+ l.Printf(" count: %9d\n", m.Count())
+ l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
+ l.Printf(" mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("timer %s\n", name)
+ l.Printf(" count: %9d\n", t.Count())
+ l.Printf(" min: %9d\n", t.Min())
+ l.Printf(" max: %9d\n", t.Max())
+ l.Printf(" mean: %12.2f\n", t.Mean())
+ l.Printf(" stddev: %12.2f\n", t.StdDev())
+ l.Printf(" median: %12.2f\n", ps[0])
+ l.Printf(" 75%%: %12.2f\n", ps[1])
+ l.Printf(" 95%%: %12.2f\n", ps[2])
+ l.Printf(" 99%%: %12.2f\n", ps[3])
+ l.Printf(" 99.9%%: %12.2f\n", ps[4])
+ l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
+ l.Printf(" mean rate: %12.2f\n", t.RateMean())
+ }
+ })
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md
new file mode 100644
index 00000000..47454f54
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+ time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak: 42604 kB
+VmSize: 42604 kB
+VmLck: 0 kB
+VmHWM: 1120 kB
+VmRSS: 1120 kB
+VmData: 35460 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 36 kB
+VmSwap: 0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+ "fmt"
+ "metrics"
+ "time"
+)
+
+func main() {
+ fmt.Sprintf("foo")
+ metrics.NewRegistry()
+ time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak: 44016 kB
+VmSize: 44016 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak: 55024 kB
+VmSize: 55024 kB
+VmLck: 0 kB
+VmHWM: 12440 kB
+VmRSS: 12440 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak: 44012 kB
+VmSize: 44012 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak: 55020 kB
+VmSize: 55020 kB
+VmLck: 0 kB
+VmHWM: 12432 kB
+VmRSS: 12432 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 60 kB
+VmSwap: 0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak: 72272 kB
+VmSize: 72272 kB
+VmLck: 0 kB
+VmHWM: 16204 kB
+VmRSS: 16204 kB
+VmData: 65100 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 80 kB
+VmSwap: 0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 256912 kB
+VmSize: 256912 kB
+VmLck: 0 kB
+VmHWM: 146204 kB
+VmRSS: 146204 kB
+VmData: 249740 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 448 kB
+VmSwap: 0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 908112 kB
+VmSize: 908112 kB
+VmLck: 0 kB
+VmHWM: 645832 kB
+VmRSS: 645588 kB
+VmData: 900940 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1716 kB
+VmSwap: 1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak: 62480 kB
+VmSize: 62480 kB
+VmLck: 0 kB
+VmHWM: 11572 kB
+VmRSS: 11572 kB
+VmData: 55308 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 153296 kB
+VmSize: 153296 kB
+VmLck: 0 kB
+VmHWM: 101176 kB
+VmRSS: 101176 kB
+VmData: 146124 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 240 kB
+VmSwap: 0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 557264 kB
+VmSize: 557264 kB
+VmLck: 0 kB
+VmHWM: 501056 kB
+VmRSS: 501056 kB
+VmData: 550092 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1032 kB
+VmSwap: 0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak: 74504 kB
+VmSize: 74504 kB
+VmLck: 0 kB
+VmHWM: 24124 kB
+VmRSS: 24124 kB
+VmData: 67340 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 92 kB
+VmSwap: 0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak: 278920 kB
+VmSize: 278920 kB
+VmLck: 0 kB
+VmHWM: 227300 kB
+VmRSS: 227300 kB
+VmData: 271756 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 488 kB
+VmSwap: 0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go
new file mode 100644
index 00000000..0389ab0b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter.go
@@ -0,0 +1,233 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Count() int64
+ Mark(int64)
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Meter
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+func NewMeter() Meter {
+ if UseNilMetrics {
+ return NilMeter{}
+ }
+ m := newStandardMeter()
+ arbiter.Lock()
+ defer arbiter.Unlock()
+ arbiter.meters = append(arbiter.meters, m)
+ if !arbiter.started {
+ arbiter.started = true
+ go arbiter.tick()
+ }
+ return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+func NewRegisteredMeter(name string, r Registry) Meter {
+ c := NewMeter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+ count int64
+ rate1, rate5, rate15, rateMean float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+ panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+ lock sync.RWMutex
+ snapshot *MeterSnapshot
+ a1, a5, a15 EWMA
+ startTime time.Time
+}
+
+func newStandardMeter() *StandardMeter {
+ return &StandardMeter{
+ snapshot: &MeterSnapshot{},
+ a1: NewEWMA1(),
+ a5: NewEWMA5(),
+ a15: NewEWMA15(),
+ startTime: time.Now(),
+ }
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+ m.lock.RLock()
+ count := m.snapshot.count
+ m.lock.RUnlock()
+ return count
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.snapshot.count += n
+ m.a1.Update(n)
+ m.a5.Update(n)
+ m.a15.Update(n)
+ m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+ m.lock.RLock()
+ rate1 := m.snapshot.rate1
+ m.lock.RUnlock()
+ return rate1
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+ m.lock.RLock()
+ rate5 := m.snapshot.rate5
+ m.lock.RUnlock()
+ return rate5
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+ m.lock.RLock()
+ rate15 := m.snapshot.rate15
+ m.lock.RUnlock()
+ return rate15
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+ m.lock.RLock()
+ rateMean := m.snapshot.rateMean
+ m.lock.RUnlock()
+ return rateMean
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+ m.lock.RLock()
+ snapshot := *m.snapshot
+ m.lock.RUnlock()
+ return &snapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+ // should run with write lock held on m.lock
+ snapshot := m.snapshot
+ snapshot.rate1 = m.a1.Rate()
+ snapshot.rate5 = m.a5.Rate()
+ snapshot.rate15 = m.a15.Rate()
+ snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+}
+
+func (m *StandardMeter) tick() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.a1.Tick()
+ m.a5.Tick()
+ m.a15.Tick()
+ m.updateSnapshot()
+}
+
+type meterArbiter struct {
+ sync.RWMutex
+ started bool
+ meters []*StandardMeter
+ ticker *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+ for {
+ select {
+ case <-ma.ticker.C:
+ ma.tickMeters()
+ }
+ }
+}
+
+func (ma *meterArbiter) tickMeters() {
+ ma.RLock()
+ defer ma.RUnlock()
+ for _, meter := range ma.meters {
+ meter.tick()
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go
new file mode 100644
index 00000000..26ce1398
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/meter_test.go
@@ -0,0 +1,60 @@
+package metrics
+
+import (
+ "testing"
+ "time"
+)
+
+func BenchmarkMeter(b *testing.B) {
+ m := NewMeter()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Mark(1)
+ }
+}
+
+func TestGetOrRegisterMeter(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredMeter("foo", r).Mark(47)
+ if m := GetOrRegisterMeter("foo", r); 47 != m.Count() {
+ t.Fatal(m)
+ }
+}
+
+func TestMeterDecay(t *testing.T) {
+ ma := meterArbiter{
+ ticker: time.NewTicker(1),
+ }
+ m := newStandardMeter()
+ ma.meters = append(ma.meters, m)
+ go ma.tick()
+ m.Mark(1)
+ rateMean := m.RateMean()
+ time.Sleep(1)
+ if m.RateMean() >= rateMean {
+ t.Error("m.RateMean() didn't decrease")
+ }
+}
+
+func TestMeterNonzero(t *testing.T) {
+ m := NewMeter()
+ m.Mark(3)
+ if count := m.Count(); 3 != count {
+ t.Errorf("m.Count(): 3 != %v\n", count)
+ }
+}
+
+func TestMeterSnapshot(t *testing.T) {
+ m := NewMeter()
+ m.Mark(1)
+ if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
+ t.Fatal(snapshot)
+ }
+}
+
+func TestMeterZero(t *testing.T) {
+ m := NewMeter()
+ if count := m.Count(); 0 != count {
+ t.Errorf("m.Count(): 0 != %v\n", count)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go
new file mode 100644
index 00000000..b97a49ed
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+//
+//
+// Coda Hale's original work:
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics. If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go
new file mode 100644
index 00000000..083f9676
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/metrics_test.go
@@ -0,0 +1,107 @@
+package metrics
+
+import (
+ "io/ioutil"
+ "log"
+ "sync"
+ "testing"
+)
+
+const FANOUT = 128
+
+// Stop the compiler from complaining during debugging.
+var (
+ _ = ioutil.Discard
+ _ = log.LstdFlags
+)
+
+func BenchmarkMetrics(b *testing.B) {
+ r := NewRegistry()
+ c := NewRegisteredCounter("counter", r)
+ g := NewRegisteredGauge("gauge", r)
+ gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
+ h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
+ m := NewRegisteredMeter("meter", r)
+ t := NewRegisteredTimer("timer", r)
+ RegisterDebugGCStats(r)
+ RegisterRuntimeMemStats(r)
+ b.ResetTimer()
+ ch := make(chan bool)
+
+ wgD := &sync.WaitGroup{}
+ /*
+ wgD.Add(1)
+ go func() {
+ defer wgD.Done()
+ //log.Println("go CaptureDebugGCStats")
+ for {
+ select {
+ case <-ch:
+ //log.Println("done CaptureDebugGCStats")
+ return
+ default:
+ CaptureDebugGCStatsOnce(r)
+ }
+ }
+ }()
+ //*/
+
+ wgR := &sync.WaitGroup{}
+ //*
+ wgR.Add(1)
+ go func() {
+ defer wgR.Done()
+ //log.Println("go CaptureRuntimeMemStats")
+ for {
+ select {
+ case <-ch:
+ //log.Println("done CaptureRuntimeMemStats")
+ return
+ default:
+ CaptureRuntimeMemStatsOnce(r)
+ }
+ }
+ }()
+ //*/
+
+ wgW := &sync.WaitGroup{}
+ /*
+ wgW.Add(1)
+ go func() {
+ defer wgW.Done()
+ //log.Println("go Write")
+ for {
+ select {
+ case <-ch:
+ //log.Println("done Write")
+ return
+ default:
+ WriteOnce(r, ioutil.Discard)
+ }
+ }
+ }()
+ //*/
+
+ wg := &sync.WaitGroup{}
+ wg.Add(FANOUT)
+ for i := 0; i < FANOUT; i++ {
+ go func(i int) {
+ defer wg.Done()
+ //log.Println("go", i)
+ for i := 0; i < b.N; i++ {
+ c.Inc(1)
+ g.Update(int64(i))
+ gf.Update(float64(i))
+ h.Update(int64(i))
+ m.Mark(1)
+ t.Update(1)
+ }
+ //log.Println("done", i)
+ }(i)
+ }
+ wg.Wait()
+ close(ch)
+ wgD.Wait()
+ wgR.Wait()
+ wgW.Wait()
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go
new file mode 100644
index 00000000..fbc292de
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "time"
+ "os"
+ "strings"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ OpenTSDBWithConfig(OpenTSDBConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ })
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := openTSDB(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+func getShortHostname() string {
+ if shortHostName == "" {
+ host, _ := os.Hostname()
+ if index := strings.Index(host, "."); index > 0 {
+ shortHostName = host[:index]
+ } else {
+ shortHostName = host
+ }
+ }
+ return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ shortHostname := getShortHostname()
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ case Gauge:
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Min(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Max(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, du*t.Mean(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, du*t.StdDev(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[0], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[1], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[2], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[3], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[4], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go
new file mode 100644
index 00000000..6173d61a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/opentsdb_test.go
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+ "net"
+ "time"
+)
+
+func ExampleOpenTSDB() {
+ addr, _ := net.ResolveTCPAddr("net", ":2003")
+ go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr)
+}
+
+func ExampleOpenTSDBWithConfig() {
+ addr, _ := net.ResolveTCPAddr("net", ":2003")
+ go OpenTSDBWithConfig(OpenTSDBConfig{
+ Addr: addr,
+ Registry: DefaultRegistry,
+ FlushInterval: 1 * time.Second,
+ DurationUnit: time.Millisecond,
+ })
+}
+
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go
new file mode 100644
index 00000000..9ef498a2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry.go
@@ -0,0 +1,168 @@
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists. If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+ return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+ // Call the given function for each registered metric.
+ Each(func(string, interface{}))
+
+ // Get the metric by the given name or nil if none is registered.
+ Get(string) interface{}
+
+ // Gets an existing metric or registers the given one.
+ // The interface can be the metric to register if not found in registry,
+ // or a function returning the metric for lazy instantiation.
+ GetOrRegister(string, interface{}) interface{}
+
+ // Register the given metric under the given name.
+ Register(string, interface{}) error
+
+ // Run all registered healthchecks.
+ RunHealthchecks()
+
+ // Unregister the metric with the given name.
+ Unregister(string)
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+ metrics map[string]interface{}
+ mutex sync.Mutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+ return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+ for name, i := range r.registered() {
+ f(name, i)
+ }
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if metric, ok := r.metrics[name]; ok {
+ return metric
+ }
+ if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+ i = v.Call(nil)[0].Interface()
+ }
+ r.register(name, i)
+ return i
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for _, i := range r.metrics {
+ if h, ok := i.(Healthcheck); ok {
+ h.Check()
+ }
+ }
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ delete(r.metrics, name)
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+ if _, ok := r.metrics[name]; ok {
+ return DuplicateMetric(name)
+ }
+ switch i.(type) {
+ case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+ r.metrics[name] = i
+ }
+ return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+ metrics := make(map[string]interface{}, len(r.metrics))
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for name, i := range r.metrics {
+ metrics[name] = i
+ }
+ return metrics
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+ DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+ return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+ return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+ return DefaultRegistry.Register(name, i)
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+ DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+ DefaultRegistry.Unregister(name)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go
new file mode 100644
index 00000000..9ba0a020
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/registry_test.go
@@ -0,0 +1,118 @@
+package metrics
+
+import "testing"
+
+func BenchmarkRegistry(b *testing.B) {
+ r := NewRegistry()
+ r.Register("foo", NewCounter())
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ r.Each(func(string, interface{}) {})
+ }
+}
+
+func TestRegistry(t *testing.T) {
+ r := NewRegistry()
+ r.Register("foo", NewCounter())
+ i := 0
+ r.Each(func(name string, iface interface{}) {
+ i++
+ if "foo" != name {
+ t.Fatal(name)
+ }
+ if _, ok := iface.(Counter); !ok {
+ t.Fatal(iface)
+ }
+ })
+ if 1 != i {
+ t.Fatal(i)
+ }
+ r.Unregister("foo")
+ i = 0
+ r.Each(func(string, interface{}) { i++ })
+ if 0 != i {
+ t.Fatal(i)
+ }
+}
+
+func TestRegistryDuplicate(t *testing.T) {
+ r := NewRegistry()
+ if err := r.Register("foo", NewCounter()); nil != err {
+ t.Fatal(err)
+ }
+ if err := r.Register("foo", NewGauge()); nil == err {
+ t.Fatal(err)
+ }
+ i := 0
+ r.Each(func(name string, iface interface{}) {
+ i++
+ if _, ok := iface.(Counter); !ok {
+ t.Fatal(iface)
+ }
+ })
+ if 1 != i {
+ t.Fatal(i)
+ }
+}
+
+func TestRegistryGet(t *testing.T) {
+ r := NewRegistry()
+ r.Register("foo", NewCounter())
+ if count := r.Get("foo").(Counter).Count(); 0 != count {
+ t.Fatal(count)
+ }
+ r.Get("foo").(Counter).Inc(1)
+ if count := r.Get("foo").(Counter).Count(); 1 != count {
+ t.Fatal(count)
+ }
+}
+
+func TestRegistryGetOrRegister(t *testing.T) {
+ r := NewRegistry()
+
+ // First metric wins with GetOrRegister
+ _ = r.GetOrRegister("foo", NewCounter())
+ m := r.GetOrRegister("foo", NewGauge())
+ if _, ok := m.(Counter); !ok {
+ t.Fatal(m)
+ }
+
+ i := 0
+ r.Each(func(name string, iface interface{}) {
+ i++
+ if name != "foo" {
+ t.Fatal(name)
+ }
+ if _, ok := iface.(Counter); !ok {
+ t.Fatal(iface)
+ }
+ })
+ if i != 1 {
+ t.Fatal(i)
+ }
+}
+
+func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
+ r := NewRegistry()
+
+ // First metric wins with GetOrRegister
+ _ = r.GetOrRegister("foo", NewCounter)
+ m := r.GetOrRegister("foo", NewGauge)
+ if _, ok := m.(Counter); !ok {
+ t.Fatal(m)
+ }
+
+ i := 0
+ r.Each(func(name string, iface interface{}) {
+ i++
+ if name != "foo" {
+ t.Fatal(name)
+ }
+ if _, ok := iface.(Counter); !ok {
+ t.Fatal(iface)
+ }
+ })
+ if i != 1 {
+ t.Fatal(i)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go
new file mode 100644
index 00000000..82574bf2
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime.go
@@ -0,0 +1,200 @@
+package metrics
+
+import (
+ "runtime"
+ "time"
+)
+
+var (
+ memStats runtime.MemStats
+ runtimeMetrics struct {
+ MemStats struct {
+ Alloc Gauge
+ BuckHashSys Gauge
+ DebugGC Gauge
+ EnableGC Gauge
+ Frees Gauge
+ HeapAlloc Gauge
+ HeapIdle Gauge
+ HeapInuse Gauge
+ HeapObjects Gauge
+ HeapReleased Gauge
+ HeapSys Gauge
+ LastGC Gauge
+ Lookups Gauge
+ Mallocs Gauge
+ MCacheInuse Gauge
+ MCacheSys Gauge
+ MSpanInuse Gauge
+ MSpanSys Gauge
+ NextGC Gauge
+ NumGC Gauge
+ PauseNs Histogram
+ PauseTotalNs Gauge
+ StackInuse Gauge
+ StackSys Gauge
+ Sys Gauge
+ TotalAlloc Gauge
+ }
+ NumCgoCall Gauge
+ NumGoroutine Gauge
+ ReadMemStats Timer
+ }
+ frees uint64
+ lookups uint64
+ mallocs uint64
+ numGC uint32
+ numCgoCalls int64
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureRuntimeMemStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called in a background
+// goroutine. Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+ t := time.Now()
+ runtime.ReadMemStats(&memStats) // This takes 50-200us.
+ runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+ runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+ runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+ if memStats.DebugGC {
+ runtimeMetrics.MemStats.DebugGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.DebugGC.Update(0)
+ }
+ if memStats.EnableGC {
+ runtimeMetrics.MemStats.EnableGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.EnableGC.Update(0)
+ }
+
+ runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+ runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+ runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+ runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+ runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+ runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+ runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+ runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+ runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+ runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+ runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+ runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+ runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+ runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+ runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+ runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+
+ //
+ i := numGC % uint32(len(memStats.PauseNs))
+ ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+ if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+ for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ } else {
+ if i > ii {
+ for ; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ i = 0
+ }
+ for ; i < ii; i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ }
+ frees = memStats.Frees
+ lookups = memStats.Lookups
+ mallocs = memStats.Mallocs
+ numGC = memStats.NumGC
+
+ runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+ runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+ runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+ runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+ runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+ currentNumCgoCalls := numCgoCall()
+ runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+ numCgoCalls = currentNumCgoCalls
+
+ runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats. The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+ runtimeMetrics.MemStats.Alloc = NewGauge()
+ runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+ runtimeMetrics.MemStats.DebugGC = NewGauge()
+ runtimeMetrics.MemStats.EnableGC = NewGauge()
+ runtimeMetrics.MemStats.Frees = NewGauge()
+ runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+ runtimeMetrics.MemStats.HeapIdle = NewGauge()
+ runtimeMetrics.MemStats.HeapInuse = NewGauge()
+ runtimeMetrics.MemStats.HeapObjects = NewGauge()
+ runtimeMetrics.MemStats.HeapReleased = NewGauge()
+ runtimeMetrics.MemStats.HeapSys = NewGauge()
+ runtimeMetrics.MemStats.LastGC = NewGauge()
+ runtimeMetrics.MemStats.Lookups = NewGauge()
+ runtimeMetrics.MemStats.Mallocs = NewGauge()
+ runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+ runtimeMetrics.MemStats.MCacheSys = NewGauge()
+ runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+ runtimeMetrics.MemStats.MSpanSys = NewGauge()
+ runtimeMetrics.MemStats.NextGC = NewGauge()
+ runtimeMetrics.MemStats.NumGC = NewGauge()
+ runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+ runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+ runtimeMetrics.MemStats.StackInuse = NewGauge()
+ runtimeMetrics.MemStats.StackSys = NewGauge()
+ runtimeMetrics.MemStats.Sys = NewGauge()
+ runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+ runtimeMetrics.NumCgoCall = NewGauge()
+ runtimeMetrics.NumGoroutine = NewGauge()
+ runtimeMetrics.ReadMemStats = NewTimer()
+
+ r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+ r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+ r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+ r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+ r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+ r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+ r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+ r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+ r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+ r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+ r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+ r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+ r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+ r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+ r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+ r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+ r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+ r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+ r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+ r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+ r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+ r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+ r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+ r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+ r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+ r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+ r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+ r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+ r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go
new file mode 100644
index 00000000..38976a8c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_cgo.go
@@ -0,0 +1,9 @@
+// +build cgo
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+ return runtime.NumCgoCall()
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go
new file mode 100644
index 00000000..38220330
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo
+
+package metrics
+
+func numCgoCall() int64 {
+ return 0
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go
new file mode 100644
index 00000000..a0ca8947
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/runtime_test.go
@@ -0,0 +1,78 @@
+package metrics
+
+import (
+ "runtime"
+ "testing"
+ "time"
+)
+
+func BenchmarkRuntimeMemStats(b *testing.B) {
+ r := NewRegistry()
+ RegisterRuntimeMemStats(r)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ CaptureRuntimeMemStatsOnce(r)
+ }
+}
+
+func TestRuntimeMemStats(t *testing.T) {
+ r := NewRegistry()
+ RegisterRuntimeMemStats(r)
+ CaptureRuntimeMemStatsOnce(r)
+ zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
+ runtime.GC()
+ CaptureRuntimeMemStatsOnce(r)
+ if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
+ t.Fatal(count - zero)
+ }
+ runtime.GC()
+ runtime.GC()
+ CaptureRuntimeMemStatsOnce(r)
+ if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
+ t.Fatal(count - zero)
+ }
+ for i := 0; i < 256; i++ {
+ runtime.GC()
+ }
+ CaptureRuntimeMemStatsOnce(r)
+ if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
+ t.Fatal(count - zero)
+ }
+ for i := 0; i < 257; i++ {
+ runtime.GC()
+ }
+ CaptureRuntimeMemStatsOnce(r)
+ if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
+ t.Fatal(count - zero)
+ }
+}
+
+func TestRuntimeMemStatsBlocking(t *testing.T) {
+ if g := runtime.GOMAXPROCS(0); g < 2 {
+ t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)
+ }
+ ch := make(chan int)
+ go testRuntimeMemStatsBlocking(ch)
+ var memStats runtime.MemStats
+ t0 := time.Now()
+ runtime.ReadMemStats(&memStats)
+ t1 := time.Now()
+ t.Log("i++ during runtime.ReadMemStats:", <-ch)
+ go testRuntimeMemStatsBlocking(ch)
+ d := t1.Sub(t0)
+ t.Log(d)
+ time.Sleep(d)
+ t.Log("i++ during time.Sleep:", <-ch)
+}
+
+func testRuntimeMemStatsBlocking(ch chan int) {
+ i := 0
+ for {
+ select {
+ case ch <- i:
+ return
+ default:
+ i++
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go
new file mode 100644
index 00000000..e34b7b58
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample.go
@@ -0,0 +1,568 @@
+package metrics
+
+import (
+ "container/heap"
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Size() int
+ Snapshot() Sample
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Values() []int64
+ Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+//
+type ExpDecaySample struct {
+ alpha float64
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ t0, t1 time.Time
+ values expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ s := &ExpDecaySample{
+ alpha: alpha,
+ reservoirSize: reservoirSize,
+ t0: time.Now(),
+ values: make(expDecaySampleHeap, 0, reservoirSize),
+ }
+ s.t1 = time.Now().Add(rescaleThreshold)
+ return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.t0 = time.Now()
+ s.t1 = s.t0.Add(rescaleThreshold)
+ s.values = make(expDecaySampleHeap, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+ return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+ return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+ return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+ return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ for i, v := range s.values {
+ values[i] = v.v
+ }
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+ return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+ return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+ s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ for i, v := range s.values {
+ values[i] = v.v
+ }
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+ return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp. This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if len(s.values) == s.reservoirSize {
+ heap.Pop(&s.values)
+ }
+ heap.Push(&s.values, expDecaySample{
+ k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+ v: v,
+ })
+ if t.After(s.t1) {
+ values := s.values
+ t0 := s.t0
+ s.values = make(expDecaySampleHeap, 0, s.reservoirSize)
+ s.t0 = t
+ s.t1 = s.t0.Add(rescaleThreshold)
+ for _, v := range values {
+ v.k = v.k * math.Exp(-s.alpha*float64(s.t0.Sub(t0)))
+ heap.Push(&s.values, v)
+ }
+ }
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var max int64 = math.MinInt64
+ for _, v := range values {
+ if max < v {
+ max = v
+ }
+ }
+ return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var min int64 = math.MaxInt64
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ }
+ return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+ return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+ scores := make([]float64, len(ps))
+ size := len(values)
+ if size > 0 {
+ sort.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+ }
+ }
+ }
+ return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+ count int64
+ values []int64
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+ panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+ panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+ return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+ var sum int64
+ for _, v := range values {
+ sum += v
+ }
+ return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ m := SampleMean(values)
+ var sum float64
+ for _, v := range values {
+ d := float64(v) - m
+ sum += d * d
+ }
+ return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+//
+type UniformSample struct {
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ values []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ return &UniformSample{
+ reservoirSize: reservoirSize,
+ values: make([]int64, 0, reservoirSize),
+ }
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if len(s.values) < s.reservoirSize {
+ s.values = append(s.values, v)
+ } else {
+ s.values[rand.Intn(s.reservoirSize)] = v
+ }
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+ k float64
+ v int64
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+type expDecaySampleHeap []expDecaySample
+
+func (q expDecaySampleHeap) Len() int {
+ return len(q)
+}
+
+func (q expDecaySampleHeap) Less(i, j int) bool {
+ return q[i].k < q[j].k
+}
+
+func (q *expDecaySampleHeap) Pop() interface{} {
+ q_ := *q
+ n := len(q_)
+ i := q_[n-1]
+ q_ = q_[0 : n-1]
+ *q = q_
+ return i
+}
+
+func (q *expDecaySampleHeap) Push(x interface{}) {
+ q_ := *q
+ n := len(q_)
+ q_ = q_[0 : n+1]
+ q_[n] = x.(expDecaySample)
+ *q = q_
+}
+
+func (q expDecaySampleHeap) Swap(i, j int) {
+ q[i], q[j] = q[j], q[i]
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go
new file mode 100644
index 00000000..3cff3c09
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/sample_test.go
@@ -0,0 +1,352 @@
+package metrics
+
+import (
+ "math/rand"
+ "runtime"
+ "testing"
+ "time"
+)
+
+// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
+// expensive computations like Variance, the cost of copying the Sample, as
+// approximated by a make and copy, is much greater than the cost of the
+// computation for small samples and only slightly less for large samples.
+func BenchmarkCompute1000(b *testing.B) {
+ s := make([]int64, 1000)
+ for i := 0; i < len(s); i++ {
+ s[i] = int64(i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ SampleVariance(s)
+ }
+}
+func BenchmarkCompute1000000(b *testing.B) {
+ s := make([]int64, 1000000)
+ for i := 0; i < len(s); i++ {
+ s[i] = int64(i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ SampleVariance(s)
+ }
+}
+func BenchmarkCopy1000(b *testing.B) {
+ s := make([]int64, 1000)
+ for i := 0; i < len(s); i++ {
+ s[i] = int64(i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sCopy := make([]int64, len(s))
+ copy(sCopy, s)
+ }
+}
+func BenchmarkCopy1000000(b *testing.B) {
+ s := make([]int64, 1000000)
+ for i := 0; i < len(s); i++ {
+ s[i] = int64(i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ sCopy := make([]int64, len(s))
+ copy(sCopy, s)
+ }
+}
+
+func BenchmarkExpDecaySample257(b *testing.B) {
+ benchmarkSample(b, NewExpDecaySample(257, 0.015))
+}
+
+func BenchmarkExpDecaySample514(b *testing.B) {
+ benchmarkSample(b, NewExpDecaySample(514, 0.015))
+}
+
+func BenchmarkExpDecaySample1028(b *testing.B) {
+ benchmarkSample(b, NewExpDecaySample(1028, 0.015))
+}
+
+func BenchmarkUniformSample257(b *testing.B) {
+ benchmarkSample(b, NewUniformSample(257))
+}
+
+func BenchmarkUniformSample514(b *testing.B) {
+ benchmarkSample(b, NewUniformSample(514))
+}
+
+func BenchmarkUniformSample1028(b *testing.B) {
+ benchmarkSample(b, NewUniformSample(1028))
+}
+
+func TestExpDecaySample10(t *testing.T) {
+ rand.Seed(1)
+ s := NewExpDecaySample(100, 0.99)
+ for i := 0; i < 10; i++ {
+ s.Update(int64(i))
+ }
+ if size := s.Count(); 10 != size {
+ t.Errorf("s.Count(): 10 != %v\n", size)
+ }
+ if size := s.Size(); 10 != size {
+ t.Errorf("s.Size(): 10 != %v\n", size)
+ }
+ if l := len(s.Values()); 10 != l {
+ t.Errorf("len(s.Values()): 10 != %v\n", l)
+ }
+ for _, v := range s.Values() {
+ if v > 10 || v < 0 {
+ t.Errorf("out of range [0, 10): %v\n", v)
+ }
+ }
+}
+
+func TestExpDecaySample100(t *testing.T) {
+ rand.Seed(1)
+ s := NewExpDecaySample(1000, 0.01)
+ for i := 0; i < 100; i++ {
+ s.Update(int64(i))
+ }
+ if size := s.Count(); 100 != size {
+ t.Errorf("s.Count(): 100 != %v\n", size)
+ }
+ if size := s.Size(); 100 != size {
+ t.Errorf("s.Size(): 100 != %v\n", size)
+ }
+ if l := len(s.Values()); 100 != l {
+ t.Errorf("len(s.Values()): 100 != %v\n", l)
+ }
+ for _, v := range s.Values() {
+ if v > 100 || v < 0 {
+ t.Errorf("out of range [0, 100): %v\n", v)
+ }
+ }
+}
+
+func TestExpDecaySample1000(t *testing.T) {
+ rand.Seed(1)
+ s := NewExpDecaySample(100, 0.99)
+ for i := 0; i < 1000; i++ {
+ s.Update(int64(i))
+ }
+ if size := s.Count(); 1000 != size {
+ t.Errorf("s.Count(): 1000 != %v\n", size)
+ }
+ if size := s.Size(); 100 != size {
+ t.Errorf("s.Size(): 100 != %v\n", size)
+ }
+ if l := len(s.Values()); 100 != l {
+ t.Errorf("len(s.Values()): 100 != %v\n", l)
+ }
+ for _, v := range s.Values() {
+ if v > 1000 || v < 0 {
+ t.Errorf("out of range [0, 1000): %v\n", v)
+ }
+ }
+}
+
+// This test makes sure that the sample's priority is not amplified by using
+// nanosecond duration since start rather than second duration since start.
+// The priority becomes +Inf quickly after starting if this is done,
+// effectively freezing the set of samples until a rescale step happens.
+func TestExpDecaySampleNanosecondRegression(t *testing.T) {
+ rand.Seed(1)
+ s := NewExpDecaySample(100, 0.99)
+ for i := 0; i < 100; i++ {
+ s.Update(10)
+ }
+ time.Sleep(1 * time.Millisecond)
+ for i := 0; i < 100; i++ {
+ s.Update(20)
+ }
+ v := s.Values()
+ avg := float64(0)
+ for i := 0; i < len(v); i++ {
+ avg += float64(v[i])
+ }
+ avg /= float64(len(v))
+ if avg > 16 || avg < 14 {
+ t.Errorf("out of range [14, 16]: %v\n", avg)
+ }
+}
+
+func TestExpDecaySampleSnapshot(t *testing.T) {
+ now := time.Now()
+ rand.Seed(1)
+ s := NewExpDecaySample(100, 0.99)
+ for i := 1; i <= 10000; i++ {
+ s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+ }
+ snapshot := s.Snapshot()
+ s.Update(1)
+ testExpDecaySampleStatistics(t, snapshot)
+}
+
+func TestExpDecaySampleStatistics(t *testing.T) {
+ now := time.Now()
+ rand.Seed(1)
+ s := NewExpDecaySample(100, 0.99)
+ for i := 1; i <= 10000; i++ {
+ s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
+ }
+ testExpDecaySampleStatistics(t, s)
+}
+
+func TestUniformSample(t *testing.T) {
+ rand.Seed(1)
+ s := NewUniformSample(100)
+ for i := 0; i < 1000; i++ {
+ s.Update(int64(i))
+ }
+ if size := s.Count(); 1000 != size {
+ t.Errorf("s.Count(): 1000 != %v\n", size)
+ }
+ if size := s.Size(); 100 != size {
+ t.Errorf("s.Size(): 100 != %v\n", size)
+ }
+ if l := len(s.Values()); 100 != l {
+ t.Errorf("len(s.Values()): 100 != %v\n", l)
+ }
+ for _, v := range s.Values() {
+ if v > 1000 || v < 0 {
+ t.Errorf("out of range [0, 100): %v\n", v)
+ }
+ }
+}
+
+func TestUniformSampleIncludesTail(t *testing.T) {
+ rand.Seed(1)
+ s := NewUniformSample(100)
+ max := 100
+ for i := 0; i < max; i++ {
+ s.Update(int64(i))
+ }
+ v := s.Values()
+ sum := 0
+ exp := (max - 1) * max / 2
+ for i := 0; i < len(v); i++ {
+ sum += int(v[i])
+ }
+ if exp != sum {
+ t.Errorf("sum: %v != %v\n", exp, sum)
+ }
+}
+
+func TestUniformSampleSnapshot(t *testing.T) {
+ s := NewUniformSample(100)
+ for i := 1; i <= 10000; i++ {
+ s.Update(int64(i))
+ }
+ snapshot := s.Snapshot()
+ s.Update(1)
+ testUniformSampleStatistics(t, snapshot)
+}
+
+func TestUniformSampleStatistics(t *testing.T) {
+ rand.Seed(1)
+ s := NewUniformSample(100)
+ for i := 1; i <= 10000; i++ {
+ s.Update(int64(i))
+ }
+ testUniformSampleStatistics(t, s)
+}
+
+func benchmarkSample(b *testing.B, s Sample) {
+ var memStats runtime.MemStats
+ runtime.ReadMemStats(&memStats)
+ pauseTotalNs := memStats.PauseTotalNs
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ s.Update(1)
+ }
+ b.StopTimer()
+ runtime.GC()
+ runtime.ReadMemStats(&memStats)
+ b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
+}
+
+func testExpDecaySampleStatistics(t *testing.T, s Sample) {
+ if count := s.Count(); 10000 != count {
+ t.Errorf("s.Count(): 10000 != %v\n", count)
+ }
+ if min := s.Min(); 107 != min {
+ t.Errorf("s.Min(): 107 != %v\n", min)
+ }
+ if max := s.Max(); 10000 != max {
+ t.Errorf("s.Max(): 10000 != %v\n", max)
+ }
+ if mean := s.Mean(); 4965.98 != mean {
+ t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
+ }
+ if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
+ t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
+ }
+ ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+ if 4615 != ps[0] {
+ t.Errorf("median: 4615 != %v\n", ps[0])
+ }
+ if 7672 != ps[1] {
+ t.Errorf("75th percentile: 7672 != %v\n", ps[1])
+ }
+ if 9998.99 != ps[2] {
+ t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
+ }
+}
+
+func testUniformSampleStatistics(t *testing.T, s Sample) {
+ if count := s.Count(); 10000 != count {
+ t.Errorf("s.Count(): 10000 != %v\n", count)
+ }
+ if min := s.Min(); 9412 != min {
+ t.Errorf("s.Min(): 9412 != %v\n", min)
+ }
+ if max := s.Max(); 10000 != max {
+ t.Errorf("s.Max(): 10000 != %v\n", max)
+ }
+ if mean := s.Mean(); 9902.26 != mean {
+ t.Errorf("s.Mean(): 9902.26 != %v\n", mean)
+ }
+ if stdDev := s.StdDev(); 101.8667384380201 != stdDev {
+ t.Errorf("s.StdDev(): 101.8667384380201 != %v\n", stdDev)
+ }
+ ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
+ if 9930.5 != ps[0] {
+ t.Errorf("median: 9930.5 != %v\n", ps[0])
+ }
+ if 9973.75 != ps[1] {
+ t.Errorf("75th percentile: 9973.75 != %v\n", ps[1])
+ }
+ if 9999.99 != ps[2] {
+ t.Errorf("99th percentile: 9999.99 != %v\n", ps[2])
+ }
+}
+
+// TestUniformSampleConcurrentUpdateCount would expose data race problems with
+// concurrent Update and Count calls on Sample when test is called with -race
+// argument
+func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ s := NewUniformSample(100)
+ for i := 0; i < 100; i++ {
+ s.Update(int64(i))
+ }
+ quit := make(chan struct{})
+ go func() {
+ t := time.NewTicker(10 * time.Millisecond)
+ for {
+ select {
+ case <-t.C:
+ s.Update(rand.Int63())
+ case <-quit:
+ t.Stop()
+ return
+ }
+ }
+ }()
+ for i := 0; i < 1000; i++ {
+ s.Count()
+ time.Sleep(5 * time.Millisecond)
+ }
+ quit <- struct{}{}
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go
new file mode 100644
index 00000000..0afcb484
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/stathat/stathat.go
@@ -0,0 +1,69 @@
+// Metrics output to StatHat.
+package stathat
+
+import (
+ "github.com/rcrowley/go-metrics"
+ "github.com/stathat/go"
+ "log"
+ "time"
+)
+
+func Stathat(r metrics.Registry, d time.Duration, userkey string) {
+ for {
+ if err := sh(r, userkey); nil != err {
+ log.Println(err)
+ }
+ time.Sleep(d)
+ }
+}
+
+func sh(r metrics.Registry, userkey string) error {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case metrics.Counter:
+ stathat.PostEZCount(name, userkey, int(metric.Count()))
+ case metrics.Gauge:
+ stathat.PostEZValue(name, userkey, float64(metric.Value()))
+ case metrics.GaugeFloat64:
+ stathat.PostEZValue(name, userkey, float64(metric.Value()))
+ case metrics.Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ stathat.PostEZCount(name+".count", userkey, int(h.Count()))
+ stathat.PostEZValue(name+".min", userkey, float64(h.Min()))
+ stathat.PostEZValue(name+".max", userkey, float64(h.Max()))
+ stathat.PostEZValue(name+".mean", userkey, float64(h.Mean()))
+ stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev()))
+ stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
+ stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
+ stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
+ stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
+ stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
+ case metrics.Meter:
+ m := metric.Snapshot()
+ stathat.PostEZCount(name+".count", userkey, int(m.Count()))
+ stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
+ stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
+ stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
+ stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean()))
+ case metrics.Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ stathat.PostEZCount(name+".count", userkey, int(t.Count()))
+ stathat.PostEZValue(name+".min", userkey, float64(t.Min()))
+ stathat.PostEZValue(name+".max", userkey, float64(t.Max()))
+ stathat.PostEZValue(name+".mean", userkey, float64(t.Mean()))
+ stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev()))
+ stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
+ stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
+ stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
+ stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
+ stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
+ stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1()))
+ stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5()))
+ stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15()))
+ stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean()))
+ }
+ })
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go
new file mode 100644
index 00000000..693f1908
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+ "fmt"
+ "log/syslog"
+ "time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+ for _ = range time.Tick(d) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ case Gauge:
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ case GaugeFloat64:
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ case Healthcheck:
+ metric.Check()
+ w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+ name,
+ h.Count(),
+ h.Min(),
+ h.Max(),
+ h.Mean(),
+ h.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ ))
+ case Meter:
+ m := metric.Snapshot()
+ w.Info(fmt.Sprintf(
+ "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+ name,
+ m.Count(),
+ m.Rate1(),
+ m.Rate5(),
+ m.Rate15(),
+ m.RateMean(),
+ ))
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+ name,
+ t.Count(),
+ t.Min(),
+ t.Max(),
+ t.Mean(),
+ t.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ t.Rate1(),
+ t.Rate5(),
+ t.Rate15(),
+ t.RateMean(),
+ ))
+ }
+ })
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go
new file mode 100644
index 00000000..73f19b58
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer.go
@@ -0,0 +1,299 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Timer
+ StdDev() float64
+ Time(func())
+ Update(time.Duration)
+ UpdateSince(time.Time)
+ Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: h,
+ meter: m,
+ }
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+func NewRegisteredTimer(name string, r Registry) Timer {
+ c := NewTimer()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+func NewTimer() Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+ meter: NewMeter(),
+ }
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+ h Histogram
+ m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+ histogram Histogram
+ meter Meter
+ mutex sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+ return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+ return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+ return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+ return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+ return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+ return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+ return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+ return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return &TimerSnapshot{
+ histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+ meter: t.meter.Snapshot().(*MeterSnapshot),
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+ return t.histogram.StdDev()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+ ts := time.Now()
+ f()
+ t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(d))
+ t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(time.Since(ts)))
+ t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+ return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+ histogram *HistogramSnapshot
+ meter *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+ panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+ panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+ panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go
new file mode 100644
index 00000000..2fa415d4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/timer_test.go
@@ -0,0 +1,81 @@
+package metrics
+
+import (
+ "math"
+ "testing"
+ "time"
+)
+
+func BenchmarkTimer(b *testing.B) {
+ tm := NewTimer()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ tm.Update(1)
+ }
+}
+
+func TestGetOrRegisterTimer(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredTimer("foo", r).Update(47)
+ if tm := GetOrRegisterTimer("foo", r); 1 != tm.Count() {
+ t.Fatal(tm)
+ }
+}
+
+func TestTimerExtremes(t *testing.T) {
+ tm := NewTimer()
+ tm.Update(math.MaxInt64)
+ tm.Update(0)
+ if stdDev := tm.StdDev(); 4.611686018427388e+18 != stdDev {
+ t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
+ }
+}
+
+func TestTimerFunc(t *testing.T) {
+ tm := NewTimer()
+ tm.Time(func() { time.Sleep(50e6) })
+ if max := tm.Max(); 45e6 > max || max > 55e6 {
+ t.Errorf("tm.Max(): 45e6 > %v || %v > 55e6\n", max, max)
+ }
+}
+
+func TestTimerZero(t *testing.T) {
+ tm := NewTimer()
+ if count := tm.Count(); 0 != count {
+ t.Errorf("tm.Count(): 0 != %v\n", count)
+ }
+ if min := tm.Min(); 0 != min {
+ t.Errorf("tm.Min(): 0 != %v\n", min)
+ }
+ if max := tm.Max(); 0 != max {
+ t.Errorf("tm.Max(): 0 != %v\n", max)
+ }
+ if mean := tm.Mean(); 0.0 != mean {
+ t.Errorf("tm.Mean(): 0.0 != %v\n", mean)
+ }
+ if stdDev := tm.StdDev(); 0.0 != stdDev {
+ t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev)
+ }
+ ps := tm.Percentiles([]float64{0.5, 0.75, 0.99})
+ if 0.0 != ps[0] {
+ t.Errorf("median: 0.0 != %v\n", ps[0])
+ }
+ if 0.0 != ps[1] {
+ t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
+ }
+ if 0.0 != ps[2] {
+ t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
+ }
+ if rate1 := tm.Rate1(); 0.0 != rate1 {
+ t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1)
+ }
+ if rate5 := tm.Rate5(); 0.0 != rate5 {
+ t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5)
+ }
+ if rate15 := tm.Rate15(); 0.0 != rate15 {
+ t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15)
+ }
+ if rateMean := tm.RateMean(); 0.0 != rateMean {
+ t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go
new file mode 100644
index 00000000..091e971d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteOnce(r, w)
+ }
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+ var namedMetrics namedMetricSlice
+ r.Each(func(name string, i interface{}) {
+ namedMetrics = append(namedMetrics, namedMetric{name, i})
+ })
+
+ sort.Sort(namedMetrics)
+ for _, namedMetric := range namedMetrics {
+ switch metric := namedMetric.m.(type) {
+ case Counter:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ case Gauge:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+ fmt.Fprintf(w, " error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", h.Count())
+ fmt.Fprintf(w, " min: %9d\n", h.Min())
+ fmt.Fprintf(w, " max: %9d\n", h.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", m.Count())
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", t.Count())
+ fmt.Fprintf(w, " min: %9d\n", t.Min())
+ fmt.Fprintf(w, " max: %9d\n", t.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
+ }
+ }
+}
+
+type namedMetric struct {
+ name string
+ m interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+ return nms[i].name < nms[j].name
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go
new file mode 100644
index 00000000..1aacc287
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/go-metrics/writer_test.go
@@ -0,0 +1,22 @@
+package metrics
+
+import (
+ "sort"
+ "testing"
+)
+
+func TestMetricsSorting(t *testing.T) {
+ var namedMetrics = namedMetricSlice{
+ {name: "zzz"},
+ {name: "bbb"},
+ {name: "fff"},
+ {name: "ggg"},
+ }
+
+ sort.Sort(namedMetrics)
+ for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
+ if namedMetrics[i].name != name {
+ t.Fail()
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore
new file mode 100644
index 00000000..ca502e29
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.gitignore
@@ -0,0 +1,4 @@
+*.nut
+*.swp
+examples/example1
+examples/example_web
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml
new file mode 100644
index 00000000..4f2ee4d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE
new file mode 100644
index 00000000..01a9a5c4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md
new file mode 100644
index 00000000..61068a82
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/README.md
@@ -0,0 +1,119 @@
+# GoRelic
+
+New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and
+send them to NewRelic.
+
+### Requirements
+- Go 1.1 or higher
+- github.com/yvasiyarov/gorelic
+- github.com/yvasiyarov/newrelic_platform_go
+- github.com/yvasiyarov/go-metrics
+
+You have to install manually only first two dependencies. All other dependencies will be installed automatically
+by Go toolchain.
+
+### Installation
+```bash
+go get github.com/yvasiyarov/gorelic
+```
+and add to the initialization part of your application following code:
+```go
+import (
+ "github.com/yvasiyarov/gorelic"
+)
+....
+
+agent := gorelic.NewAgent()
+agent.Verbose = true
+agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE"
+agent.Run()
+
+```
+
+### Middleware
+If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware:
+- https://github.com/yvasiyarov/beego_gorelic
+- https://github.com/yvasiyarov/martini_gorelic
+- https://github.com/yvasiyarov/gocraft_gorelic
+- http://wiki.colar.net/revel_newelic
+- https://github.com/jingweno/negroni-gorelic
+- https://github.com/brandfolder/gin-gorelic
+
+
+### Configuration
+- NewrelicLicense - its the only mandatory setting of this agent.
+- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon"
+- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds
+- Verbose - print some usefull for debugging information. Default value: false
+- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true
+- CollectHTTPStat - should agent collect HTTP metrics. Default value: false
+- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true
+- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation.
+- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation.
+
+
+## Metrics reported by plugin
+This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime.
+
+### General metrics
+- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package
+- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package
+
+### Garbage collector metrics
+- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug
+- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds)
+- Runtime/GC/GCTime/Max - max GC time
+- Runtime/GC/GCTime/Min - min GC time
+- Runtime/GC/GCTime/Mean - GC mean time
+- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time
+
+All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval.
+If in your workload GC is called more often - you can consider decreasing value of GCPollInterval.
+But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values.
+
+### Memory allocator
+- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally.
+- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks.
+- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs.
+- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs.
+- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap.
+- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs.
+- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute
+- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute
+- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute
+- Component/Runtime/Memory/InUse/Total - total amount of memory in use
+- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap
+- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures
+- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures
+- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks
+
+### Process metrics
+- Component/Runtime/System/Threads - number of OS threads used
+- Runtime/System/FDSize - number of file descriptors, used by process
+- Runtime/System/Memory/VmPeakSize - VM max size
+- Runtime/System/Memory/VmCurrent - VM current size
+- Runtime/System/Memory/RssPeak - max size of resident memory set
+- Runtime/System/Memory/RssCurrent - current size of resident memory set
+
+All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine.
+This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value.
+
+### HTTP metrics
+- throughput (requests per second), calculated for last minute
+- mean throughput (requests per second)
+- mean response time
+- min response time
+- max response time
+- 75%, 90%, 95% percentiles for response time
+
+
+In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc:
+
+```go
+http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler))
+```
+
+## TODO
+- Collect per-size allocation statistic
+- Collect user defined metrics
+
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go
new file mode 100644
index 00000000..660623d6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/agent.go
@@ -0,0 +1,137 @@
+package gorelic
+
+import (
+ "errors"
+ "fmt"
+ metrics "github.com/yvasiyarov/go-metrics"
+ "github.com/yvasiyarov/newrelic_platform_go"
+ "log"
+ "net/http"
+)
+
+const (
+ // DefaultNewRelicPollInterval - how often we will report metrics to NewRelic.
+ // Recommended values is 60 seconds
+ DefaultNewRelicPollInterval = 60
+
+ // DefaultGcPollIntervalInSeconds - how often we will get garbage collector run statistic
+ // Default value is - every 10 seconds
+ // During GC stat pooling - mheap will be locked, so be carefull changing this value
+ DefaultGcPollIntervalInSeconds = 10
+
+ // DefaultMemoryAllocatorPollIntervalInSeconds - how often we will get memory allocator statistic.
+ // Default value is - every 60 seconds
+ // During this process stoptheword() is called, so be carefull changing this value
+ DefaultMemoryAllocatorPollIntervalInSeconds = 60
+
+ //DefaultAgentGuid is plugin ID in NewRelic.
+ //You should not change it unless you want to create your own plugin.
+ DefaultAgentGuid = "com.github.yvasiyarov.GoRelic"
+
+ //CurrentAgentVersion is plugin version
+ CurrentAgentVersion = "0.0.6"
+
+ //DefaultAgentName in NewRelic GUI. You can change it.
+ DefaultAgentName = "Go daemon"
+)
+
+//Agent - is NewRelic agent implementation.
+//Agent start separate go routine which will report data to NewRelic
+type Agent struct {
+ NewrelicName string
+ NewrelicLicense string
+ NewrelicPollInterval int
+ Verbose bool
+ CollectGcStat bool
+ CollectMemoryStat bool
+ CollectHTTPStat bool
+ GCPollInterval int
+ MemoryAllocatorPollInterval int
+ AgentGUID string
+ AgentVersion string
+ plugin *newrelic_platform_go.NewrelicPlugin
+ HTTPTimer metrics.Timer
+}
+
+//NewAgent build new Agent objects.
+func NewAgent() *Agent {
+ agent := &Agent{
+ NewrelicName: DefaultAgentName,
+ NewrelicPollInterval: DefaultNewRelicPollInterval,
+ Verbose: false,
+ CollectGcStat: true,
+ CollectMemoryStat: true,
+ GCPollInterval: DefaultGcPollIntervalInSeconds,
+ MemoryAllocatorPollInterval: DefaultMemoryAllocatorPollIntervalInSeconds,
+ AgentGUID: DefaultAgentGuid,
+ AgentVersion: CurrentAgentVersion,
+ }
+ return agent
+}
+
+//WrapHTTPHandlerFunc instrument HTTP handler functions to collect HTTP metrics
+func (agent *Agent) WrapHTTPHandlerFunc(h tHTTPHandlerFunc) tHTTPHandlerFunc {
+ agent.initTimer()
+ return func(w http.ResponseWriter, req *http.Request) {
+ proxy := newHTTPHandlerFunc(h)
+ proxy.timer = agent.HTTPTimer
+ proxy.ServeHTTP(w, req)
+ }
+}
+
+//WrapHTTPHandler instrument HTTP handler object to collect HTTP metrics
+func (agent *Agent) WrapHTTPHandler(h http.Handler) http.Handler {
+ agent.initTimer()
+
+ proxy := newHTTPHandler(h)
+ proxy.timer = agent.HTTPTimer
+ return proxy
+}
+
+//Run initialize Agent instance and start harvest go routine
+func (agent *Agent) Run() error {
+ if agent.NewrelicLicense == "" {
+ return errors.New("please, pass a valid newrelic license key")
+ }
+
+ agent.plugin = newrelic_platform_go.NewNewrelicPlugin(agent.AgentVersion, agent.NewrelicLicense, agent.NewrelicPollInterval)
+ component := newrelic_platform_go.NewPluginComponent(agent.NewrelicName, agent.AgentGUID)
+ agent.plugin.AddComponent(component)
+
+ addRuntimeMericsToComponent(component)
+
+ if agent.CollectGcStat {
+ addGCMericsToComponent(component, agent.GCPollInterval)
+ agent.debug(fmt.Sprintf("Init GC metrics collection. Poll interval %d seconds.", agent.GCPollInterval))
+ }
+ if agent.CollectMemoryStat {
+ addMemoryMericsToComponent(component, agent.MemoryAllocatorPollInterval)
+ agent.debug(fmt.Sprintf("Init memory allocator metrics collection. Poll interval %d seconds.", agent.MemoryAllocatorPollInterval))
+ }
+
+ if agent.CollectHTTPStat {
+ agent.initTimer()
+ addHTTPMericsToComponent(component, agent.HTTPTimer)
+ agent.debug(fmt.Sprintf("Init HTTP metrics collection."))
+ }
+
+ agent.plugin.Verbose = agent.Verbose
+ go agent.plugin.Run()
+ return nil
+}
+
+//Initialize global metrics.Timer object, used to collect HTTP metrics
+func (agent *Agent) initTimer() {
+ if agent.HTTPTimer == nil {
+ agent.HTTPTimer = metrics.NewTimer()
+ }
+
+ agent.CollectHTTPStat = true
+}
+
+//Print debug messages
+func (agent *Agent) debug(msg string) {
+ if agent.Verbose {
+ log.Println(msg)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go
new file mode 100644
index 00000000..69de9fee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/doc.go
@@ -0,0 +1,2 @@
+// Package gorelic is an New Relic agent implementation for Go runtime. It collect a lot of metrics about Go scheduler, garbage collector and memory allocator and send them to NewRelic.
+package gorelic
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go
new file mode 100644
index 00000000..dc6c0e34
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example1.go
@@ -0,0 +1,52 @@
+package main
+
+import (
+ "flag"
+ "github.com/yvasiyarov/gorelic"
+ "log"
+ "math/rand"
+ "runtime"
+ "time"
+)
+
+var newrelicLicense = flag.String("newrelic-license", "", "Newrelic license")
+
+func allocateAndSum(arraySize int) int {
+ arr := make([]int, arraySize, arraySize)
+ for i := range arr {
+ arr[i] = rand.Int()
+ }
+ time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond)
+
+ result := 0
+ for _, v := range arr {
+ result += v
+ }
+ //log.Printf("Array size is: %d, sum is: %d\n", arraySize, result)
+ return result
+}
+
+func doSomeJob(numRoutines int) {
+ for {
+ for i := 0; i < numRoutines; i++ {
+ go allocateAndSum(rand.Intn(1024) * 1024)
+ }
+ log.Printf("All %d routines started\n", numRoutines)
+ time.Sleep(1000 * time.Millisecond)
+ runtime.GC()
+ }
+}
+
+func main() {
+
+ flag.Parse()
+ if *newrelicLicense == "" {
+ log.Fatalf("Please, pass a valid newrelic license key.\n Use --help to get more information about available options\n")
+ }
+ agent := gorelic.NewAgent()
+ agent.Verbose = true
+ agent.NewrelicLicense = *newrelicLicense
+ agent.Run()
+
+ doSomeJob(100)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go
new file mode 100644
index 00000000..aae0ef7e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/examples/example_web.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+ "expvar"
+ "flag"
+ "github.com/yvasiyarov/gorelic"
+ "io"
+ "log"
+ "math/rand"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+var newrelicLicense = flag.String("newrelic-license", "", "Newrelic license")
+
+var numCalls = expvar.NewInt("num_calls")
+
+func allocateAndSum(arraySize int) int {
+ arr := make([]int, arraySize, arraySize)
+ for i := range arr {
+ arr[i] = rand.Int()
+ }
+ time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond)
+
+ result := 0
+ for _, v := range arr {
+ result += v
+ }
+ //log.Printf("Array size is: %d, sum is: %d\n", arraySize, result)
+ return result
+}
+
+func doSomeJob(numRoutines int) {
+ for i := 0; i < numRoutines; i++ {
+ go allocateAndSum(rand.Intn(1024) * 1024)
+ }
+ log.Printf("All %d routines started\n", numRoutines)
+ time.Sleep(1000 * time.Millisecond)
+ runtime.GC()
+}
+
+func helloServer(w http.ResponseWriter, req *http.Request) {
+
+ doSomeJob(5)
+ io.WriteString(w, "Did some work")
+}
+
+func main() {
+ flag.Parse()
+ if *newrelicLicense == "" {
+ log.Fatalf("Please, pass a valid newrelic license key.\n Use --help to get more information about available options\n")
+ }
+ agent := gorelic.NewAgent()
+ agent.Verbose = true
+ agent.CollectHTTPStat = true
+ agent.NewrelicLicense = *newrelicLicense
+ agent.Run()
+
+ http.HandleFunc("/", agent.WrapHTTPHandlerFunc(helloServer))
+ http.ListenAndServe(":8080", nil)
+
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go
new file mode 100644
index 00000000..39405940
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gc_metrics.go
@@ -0,0 +1,65 @@
+package gorelic
+
+import (
+ metrics "github.com/yvasiyarov/go-metrics"
+ "github.com/yvasiyarov/newrelic_platform_go"
+ "time"
+)
+
+func newGCMetricaDataSource(pollInterval int) goMetricaDataSource {
+ r := metrics.NewRegistry()
+
+ metrics.RegisterDebugGCStats(r)
+ go metrics.CaptureDebugGCStats(r, time.Duration(pollInterval)*time.Second)
+ return goMetricaDataSource{r}
+}
+
+func addGCMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) {
+ metrics := []*baseGoMetrica{
+ &baseGoMetrica{
+ name: "NumberOfGCCalls",
+ units: "calls",
+ dataSourceKey: "debug.GCStats.NumGC",
+ },
+ &baseGoMetrica{
+ name: "PauseTotalTime",
+ units: "nanoseconds",
+ dataSourceKey: "debug.GCStats.PauseTotal",
+ },
+ }
+
+ ds := newGCMetricaDataSource(pollInterval)
+ for _, m := range metrics {
+ m.basePath = "Runtime/GC/"
+ m.dataSource = ds
+ component.AddMetrica(&gaugeMetrica{m})
+ }
+
+ histogramMetrics := []*histogramMetrica{
+ &histogramMetrica{
+ statFunction: histogramMax,
+ baseGoMetrica: &baseGoMetrica{name: "Max"},
+ },
+ &histogramMetrica{
+ statFunction: histogramMin,
+ baseGoMetrica: &baseGoMetrica{name: "Min"},
+ },
+ &histogramMetrica{
+ statFunction: histogramMean,
+ baseGoMetrica: &baseGoMetrica{name: "Mean"},
+ },
+ &histogramMetrica{
+ statFunction: histogramPercentile,
+ percentileValue: 0.95,
+ baseGoMetrica: &baseGoMetrica{name: "Percentile95"},
+ },
+ }
+ for _, m := range histogramMetrics {
+ m.baseGoMetrica.units = "nanoseconds"
+ m.baseGoMetrica.dataSourceKey = "debug.GCStats.Pause"
+ m.baseGoMetrica.basePath = "Runtime/GC/GCTime/"
+ m.baseGoMetrica.dataSource = ds
+
+ component.AddMetrica(m)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go
new file mode 100644
index 00000000..52fcdd57
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/gometrica.go
@@ -0,0 +1,105 @@
+package gorelic
+
+import (
+ "fmt"
+ metrics "github.com/yvasiyarov/go-metrics"
+)
+
+const (
+ histogramMin = iota
+ histogramMax
+ histogramMean
+ histogramPercentile
+ histogramStdDev
+ histogramVariance
+ noHistogramFunctions
+)
+
+type goMetricaDataSource struct {
+ metrics.Registry
+}
+
+func (ds goMetricaDataSource) GetGaugeValue(key string) (float64, error) {
+ if valueContainer := ds.Get(key); valueContainer == nil {
+ return 0, fmt.Errorf("metrica with name %s is not registered\n", key)
+ } else if gauge, ok := valueContainer.(metrics.Gauge); ok {
+ return float64(gauge.Value()), nil
+ } else {
+ return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer)
+ }
+}
+
+func (ds goMetricaDataSource) GetHistogramValue(key string, statFunction int, percentile float64) (float64, error) {
+ if valueContainer := ds.Get(key); valueContainer == nil {
+ return 0, fmt.Errorf("metrica with name %s is not registered\n", key)
+ } else if histogram, ok := valueContainer.(metrics.Histogram); ok {
+ switch statFunction {
+ default:
+ return 0, fmt.Errorf("unsupported stat function for histogram: %s\n", statFunction)
+ case histogramMax:
+ return float64(histogram.Max()), nil
+ case histogramMin:
+ return float64(histogram.Min()), nil
+ case histogramMean:
+ return float64(histogram.Mean()), nil
+ case histogramStdDev:
+ return float64(histogram.StdDev()), nil
+ case histogramVariance:
+ return float64(histogram.Variance()), nil
+ case histogramPercentile:
+ return float64(histogram.Percentile(percentile)), nil
+ }
+ } else {
+ return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer)
+ }
+}
+
+type baseGoMetrica struct {
+ dataSource goMetricaDataSource
+ basePath string
+ name string
+ units string
+ dataSourceKey string
+}
+
+func (metrica *baseGoMetrica) GetName() string {
+ return metrica.basePath + metrica.name
+}
+
+func (metrica *baseGoMetrica) GetUnits() string {
+ return metrica.units
+}
+
+type gaugeMetrica struct {
+ *baseGoMetrica
+}
+
+func (metrica *gaugeMetrica) GetValue() (float64, error) {
+ return metrica.dataSource.GetGaugeValue(metrica.dataSourceKey)
+}
+
+type gaugeIncMetrica struct {
+ *baseGoMetrica
+ previousValue float64
+}
+
+func (metrica *gaugeIncMetrica) GetValue() (float64, error) {
+ var value float64
+ var currentValue float64
+ var err error
+ if currentValue, err = metrica.dataSource.GetGaugeValue(metrica.dataSourceKey); err == nil {
+ value = currentValue - metrica.previousValue
+ metrica.previousValue = currentValue
+ }
+ return value, err
+}
+
+type histogramMetrica struct {
+ *baseGoMetrica
+ statFunction int
+ percentileValue float64
+}
+
+func (metrica *histogramMetrica) GetValue() (float64, error) {
+ return metrica.dataSource.GetHistogramValue(metrica.dataSourceKey, metrica.statFunction, metrica.percentileValue)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go
new file mode 100644
index 00000000..e54cbd37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/http_metrics.go
@@ -0,0 +1,194 @@
+package gorelic
+
+import (
+ metrics "github.com/yvasiyarov/go-metrics"
+ "github.com/yvasiyarov/newrelic_platform_go"
+ "net/http"
+ "time"
+)
+
+type tHTTPHandlerFunc func(http.ResponseWriter, *http.Request)
+type tHTTPHandler struct {
+ originalHandler http.Handler
+ originalHandlerFunc tHTTPHandlerFunc
+ isFunc bool
+ timer metrics.Timer
+}
+
+var httpTimer metrics.Timer
+
+func newHTTPHandlerFunc(h tHTTPHandlerFunc) *tHTTPHandler {
+ return &tHTTPHandler{
+ isFunc: true,
+ originalHandlerFunc: h,
+ }
+}
+func newHTTPHandler(h http.Handler) *tHTTPHandler {
+ return &tHTTPHandler{
+ isFunc: false,
+ originalHandler: h,
+ }
+}
+
+func (handler *tHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ startTime := time.Now()
+ defer handler.timer.UpdateSince(startTime)
+
+ if handler.isFunc {
+ handler.originalHandlerFunc(w, req)
+ } else {
+ handler.originalHandler.ServeHTTP(w, req)
+ }
+}
+
+type baseTimerMetrica struct {
+ dataSource metrics.Timer
+ name string
+ units string
+}
+
+func (metrica *baseTimerMetrica) GetName() string {
+ return metrica.name
+}
+
+func (metrica *baseTimerMetrica) GetUnits() string {
+ return metrica.units
+}
+
+type timerRate1Metrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerRate1Metrica) GetValue() (float64, error) {
+ return metrica.dataSource.Rate1(), nil
+}
+
+type timerRateMeanMetrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerRateMeanMetrica) GetValue() (float64, error) {
+ return metrica.dataSource.RateMean(), nil
+}
+
+type timerMeanMetrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerMeanMetrica) GetValue() (float64, error) {
+ return metrica.dataSource.Mean() / float64(time.Millisecond), nil
+}
+
+type timerMinMetrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerMinMetrica) GetValue() (float64, error) {
+ return float64(metrica.dataSource.Min()) / float64(time.Millisecond), nil
+}
+
+type timerMaxMetrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerMaxMetrica) GetValue() (float64, error) {
+ return float64(metrica.dataSource.Max()) / float64(time.Millisecond), nil
+}
+
+type timerPercentile75Metrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerPercentile75Metrica) GetValue() (float64, error) {
+ return metrica.dataSource.Percentile(0.75) / float64(time.Millisecond), nil
+}
+
+type timerPercentile90Metrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerPercentile90Metrica) GetValue() (float64, error) {
+ return metrica.dataSource.Percentile(0.90) / float64(time.Millisecond), nil
+}
+
+type timerPercentile95Metrica struct {
+ *baseTimerMetrica
+}
+
+func (metrica *timerPercentile95Metrica) GetValue() (float64, error) {
+ return metrica.dataSource.Percentile(0.95) / float64(time.Millisecond), nil
+}
+
+func addHTTPMericsToComponent(component newrelic_platform_go.IComponent, timer metrics.Timer) {
+ rate1 := &timerRate1Metrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/throughput/1minute",
+ units: "rps",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(rate1)
+
+ rateMean := &timerRateMeanMetrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/throughput/rateMean",
+ units: "rps",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(rateMean)
+
+ responseTimeMean := &timerMeanMetrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/responseTime/mean",
+ units: "ms",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(responseTimeMean)
+
+ responseTimeMax := &timerMaxMetrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/responseTime/max",
+ units: "ms",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(responseTimeMax)
+
+ responseTimeMin := &timerMinMetrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/responseTime/min",
+ units: "ms",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(responseTimeMin)
+
+ responseTimePercentile75 := &timerPercentile75Metrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/responseTime/percentile75",
+ units: "ms",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(responseTimePercentile75)
+
+ responseTimePercentile90 := &timerPercentile90Metrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/responseTime/percentile90",
+ units: "ms",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(responseTimePercentile90)
+
+ responseTimePercentile95 := &timerPercentile95Metrica{
+ baseTimerMetrica: &baseTimerMetrica{
+ name: "http/responseTime/percentile95",
+ units: "ms",
+ dataSource: timer,
+ },
+ }
+ component.AddMetrica(responseTimePercentile95)
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go
new file mode 100644
index 00000000..5c8d3e4e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/memory_metrics.go
@@ -0,0 +1,110 @@
+package gorelic
+
+import (
+ metrics "github.com/yvasiyarov/go-metrics"
+ "github.com/yvasiyarov/newrelic_platform_go"
+ "time"
+)
+
+func newMemoryMetricaDataSource(pollInterval int) goMetricaDataSource {
+ r := metrics.NewRegistry()
+
+ metrics.RegisterRuntimeMemStats(r)
+ metrics.CaptureRuntimeMemStatsOnce(r)
+ go metrics.CaptureRuntimeMemStats(r, time.Duration(pollInterval)*time.Second)
+ return goMetricaDataSource{r}
+}
+
+func addMemoryMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) {
+ gaugeMetrics := []*baseGoMetrica{
+ //Memory in use metrics
+ &baseGoMetrica{
+ name: "InUse/Total",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.Alloc",
+ },
+ &baseGoMetrica{
+ name: "InUse/Heap",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.HeapAlloc",
+ },
+ &baseGoMetrica{
+ name: "InUse/Stack",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.StackInuse",
+ },
+ &baseGoMetrica{
+ name: "InUse/MSpanInuse",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.MSpanInuse",
+ },
+ &baseGoMetrica{
+ name: "InUse/MCacheInuse",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.MCacheInuse",
+ },
+ }
+ ds := newMemoryMetricaDataSource(pollInterval)
+ for _, m := range gaugeMetrics {
+ m.basePath = "Runtime/Memory/"
+ m.dataSource = ds
+ component.AddMetrica(&gaugeMetrica{m})
+ }
+
+ gaugeIncMetrics := []*baseGoMetrica{
+ //NO operations graph
+ &baseGoMetrica{
+ name: "Operations/NoPointerLookups",
+ units: "lookups",
+ dataSourceKey: "runtime.MemStats.Lookups",
+ },
+ &baseGoMetrica{
+ name: "Operations/NoMallocs",
+ units: "mallocs",
+ dataSourceKey: "runtime.MemStats.Mallocs",
+ },
+ &baseGoMetrica{
+ name: "Operations/NoFrees",
+ units: "frees",
+ dataSourceKey: "runtime.MemStats.Frees",
+ },
+
+ // Sytem memory allocations
+ &baseGoMetrica{
+ name: "SysMem/Total",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.Sys",
+ },
+ &baseGoMetrica{
+ name: "SysMem/Heap",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.HeapSys",
+ },
+ &baseGoMetrica{
+ name: "SysMem/Stack",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.StackSys",
+ },
+ &baseGoMetrica{
+ name: "SysMem/MSpan",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.MSpanSys",
+ },
+ &baseGoMetrica{
+ name: "SysMem/MCache",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.MCacheSys",
+ },
+ &baseGoMetrica{
+ name: "SysMem/BuckHash",
+ units: "bytes",
+ dataSourceKey: "runtime.MemStats.BuckHashSys",
+ },
+ }
+
+ for _, m := range gaugeIncMetrics {
+ m.basePath = "Runtime/Memory/"
+ m.dataSource = ds
+ component.AddMetrica(&gaugeIncMetrica{baseGoMetrica: m})
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json
new file mode 100644
index 00000000..7abb8ec6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/nut.json
@@ -0,0 +1,15 @@
+{
+ "Version": "0.0.6",
+ "Vendor": "yvasiyarov",
+ "Authors": [
+ {
+ "FullName": "Yuriy Vasiyarov",
+ "Email": "varyous@gmail.com"
+ }
+ ],
+ "ExtraFiles": [
+ "README.md",
+ "LICENSE"
+ ],
+ "Homepage": "https://github.com/yvasiyarov/gorelic"
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go
new file mode 100644
index 00000000..87a42ca6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/gorelic/runtime_metrics.go
@@ -0,0 +1,196 @@
+package gorelic
+
+import (
+ "fmt"
+ "github.com/yvasiyarov/newrelic_platform_go"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const linuxSystemQueryInterval = 60
+
+// Number of goroutines metrica
+type noGoroutinesMetrica struct{}
+
+func (metrica *noGoroutinesMetrica) GetName() string {
+ return "Runtime/General/NOGoroutines"
+}
+func (metrica *noGoroutinesMetrica) GetUnits() string {
+ return "goroutines"
+}
+func (metrica *noGoroutinesMetrica) GetValue() (float64, error) {
+ return float64(runtime.NumGoroutine()), nil
+}
+
+// Number of CGO calls metrica
+type noCgoCallsMetrica struct {
+ lastValue int64
+}
+
+func (metrica *noCgoCallsMetrica) GetName() string {
+ return "Runtime/General/NOCgoCalls"
+}
+func (metrica *noCgoCallsMetrica) GetUnits() string {
+ return "calls"
+}
+func (metrica *noCgoCallsMetrica) GetValue() (float64, error) {
+ currentValue := runtime.NumCgoCall()
+ value := float64(currentValue - metrica.lastValue)
+ metrica.lastValue = currentValue
+
+ return value, nil
+}
+
+//OS specific metrics data source interface
+type iSystemMetricaDataSource interface {
+ GetValue(key string) (float64, error)
+}
+
+// iSystemMetricaDataSource fabrica
+func newSystemMetricaDataSource() iSystemMetricaDataSource {
+ var ds iSystemMetricaDataSource
+ switch runtime.GOOS {
+ default:
+ ds = &systemMetricaDataSource{}
+ case "linux":
+ ds = &linuxSystemMetricaDataSource{
+ systemData: make(map[string]string),
+ }
+ }
+ return ds
+}
+
+//Default implementation of iSystemMetricaDataSource. Just return an error
+type systemMetricaDataSource struct{}
+
+func (ds *systemMetricaDataSource) GetValue(key string) (float64, error) {
+ return 0, fmt.Errorf("this metrica was not implemented yet for %s", runtime.GOOS)
+}
+
+// Linux OS implementation of ISystemMetricaDataSource
+type linuxSystemMetricaDataSource struct {
+ lastUpdate time.Time
+ systemData map[string]string
+}
+
+func (ds *linuxSystemMetricaDataSource) GetValue(key string) (float64, error) {
+ if err := ds.checkAndUpdateData(); err != nil {
+ return 0, err
+ } else if val, ok := ds.systemData[key]; !ok {
+ return 0, fmt.Errorf("system data with key %s was not found", key)
+ } else if key == "VmSize" || key == "VmPeak" || key == "VmHWM" || key == "VmRSS" {
+ valueParts := strings.Split(val, " ")
+ if len(valueParts) != 2 {
+ return 0, fmt.Errorf("invalid format for value %s", key)
+ }
+ valConverted, err := strconv.ParseFloat(valueParts[0], 64)
+ if err != nil {
+ return 0, err
+ }
+ switch valueParts[1] {
+ case "kB":
+ valConverted *= 1 << 10
+ case "mB":
+ valConverted *= 1 << 20
+ case "gB":
+ valConverted *= 1 << 30
+ }
+ return valConverted, nil
+ } else if valConverted, err := strconv.ParseFloat(val, 64); err != nil {
+ return valConverted, nil
+ } else {
+ return valConverted, nil
+ }
+}
+func (ds *linuxSystemMetricaDataSource) checkAndUpdateData() error {
+ startTime := time.Now()
+ if startTime.Sub(ds.lastUpdate) > time.Second*linuxSystemQueryInterval {
+ path := fmt.Sprintf("/proc/%d/status", os.Getpid())
+ rawStats, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ lines := strings.Split(string(rawStats), "\n")
+ for _, line := range lines {
+ parts := strings.Split(line, ":")
+ if len(parts) == 2 {
+ k := strings.TrimSpace(parts[0])
+ v := strings.TrimSpace(parts[1])
+
+ ds.systemData[k] = v
+ }
+ }
+ ds.lastUpdate = startTime
+ }
+ return nil
+}
+
+// OS specific metrica
+type systemMetrica struct {
+ sourceKey string
+ newrelicName string
+ units string
+ dataSource iSystemMetricaDataSource
+}
+
+func (metrica *systemMetrica) GetName() string {
+ return metrica.newrelicName
+}
+func (metrica *systemMetrica) GetUnits() string {
+ return metrica.units
+}
+func (metrica *systemMetrica) GetValue() (float64, error) {
+ return metrica.dataSource.GetValue(metrica.sourceKey)
+}
+
+func addRuntimeMericsToComponent(component newrelic_platform_go.IComponent) {
+ component.AddMetrica(&noGoroutinesMetrica{})
+ component.AddMetrica(&noCgoCallsMetrica{})
+
+ ds := newSystemMetricaDataSource()
+ metrics := []*systemMetrica{
+ &systemMetrica{
+ sourceKey: "Threads",
+ units: "Threads",
+ newrelicName: "Runtime/System/Threads",
+ },
+ &systemMetrica{
+ sourceKey: "FDSize",
+ units: "fd",
+ newrelicName: "Runtime/System/FDSize",
+ },
+ // Peak virtual memory size
+ &systemMetrica{
+ sourceKey: "VmPeak",
+ units: "bytes",
+ newrelicName: "Runtime/System/Memory/VmPeakSize",
+ },
+ //Virtual memory size
+ &systemMetrica{
+ sourceKey: "VmSize",
+ units: "bytes",
+ newrelicName: "Runtime/System/Memory/VmCurrent",
+ },
+ //Peak resident set size
+ &systemMetrica{
+ sourceKey: "VmHWM",
+ units: "bytes",
+ newrelicName: "Runtime/System/Memory/RssPeak",
+ },
+ //Resident set size
+ &systemMetrica{
+ sourceKey: "VmRSS",
+ units: "bytes",
+ newrelicName: "Runtime/System/Memory/RssCurrent",
+ },
+ }
+ for _, m := range metrics {
+ m.dataSource = ds
+ component.AddMetrica(m)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml
new file mode 100644
index 00000000..4f2ee4d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE
new file mode 100644
index 00000000..01a9a5c4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md
new file mode 100644
index 00000000..34462344
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/README.md
@@ -0,0 +1,11 @@
+New Relic Platform Agent SDK for Go(golang)
+====================
+
+[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go)
+
+This package provide very simple interface to NewRelic Platform http://newrelic.com/platform
+
+For example of usage see examples/wave_plugin.go
+
+For real-word example, you can have a look at:
+https://github.com/yvasiyarov/newrelic_sphinx
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go
new file mode 100644
index 00000000..d9d27535
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/agent.go
@@ -0,0 +1,27 @@
+package newrelic_platform_go
+
+import (
+ "log"
+ "os"
+)
+
+type Agent struct {
+ Host string `json:"host"`
+ Version string `json:"version"`
+ Pid int `json:"pid"`
+}
+
+func NewAgent(Version string) *Agent {
+ agent := &Agent{
+ Version: Version,
+ }
+ return agent
+}
+
+func (agent *Agent) CollectEnvironmentInfo() {
+ var err error
+ agent.Pid = os.Getpid()
+ if agent.Host, err = os.Hostname(); err != nil {
+ log.Fatalf("Can not get hostname: %#v \n", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go
new file mode 100644
index 00000000..000f7ab7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/component.go
@@ -0,0 +1,71 @@
+package newrelic_platform_go
+
+import (
+ "log"
+ "math"
+)
+
+type ComponentData interface{}
+type IComponent interface {
+ Harvest(plugin INewrelicPlugin) ComponentData
+ SetDuration(duration int)
+ AddMetrica(model IMetrica)
+ ClearSentData()
+}
+
+type PluginComponent struct {
+ Name string `json:"name"`
+ GUID string `json:"guid"`
+ Duration int `json:"duration"`
+ Metrics map[string]MetricaValue `json:"metrics"`
+ MetricaModels []IMetrica `json:"-"`
+}
+
+func NewPluginComponent(name string, guid string) *PluginComponent {
+ c := &PluginComponent{
+ Name: name,
+ GUID: guid,
+ }
+ return c
+}
+
+func (component *PluginComponent) AddMetrica(model IMetrica) {
+ component.MetricaModels = append(component.MetricaModels, model)
+}
+
+func (component *PluginComponent) ClearSentData() {
+ component.Metrics = nil
+}
+
+func (component *PluginComponent) SetDuration(duration int) {
+ component.Duration = duration
+}
+
+func (component *PluginComponent) Harvest(plugin INewrelicPlugin) ComponentData {
+ component.Metrics = make(map[string]MetricaValue, len(component.MetricaModels))
+ for i := 0; i < len(component.MetricaModels); i++ {
+ model := component.MetricaModels[i]
+ metricaKey := plugin.GetMetricaKey(model)
+
+ if newValue, err := model.GetValue(); err == nil {
+ if math.IsInf(newValue, 0) || math.IsNaN(newValue) {
+ newValue = 0
+ }
+
+ if existMetric, ok := component.Metrics[metricaKey]; ok {
+ if floatExistVal, ok := existMetric.(float64); ok {
+ component.Metrics[metricaKey] = NewAggregatedMetricaValue(floatExistVal, newValue)
+ } else if aggregatedValue, ok := existMetric.(*AggregatedMetricaValue); ok {
+ aggregatedValue.Aggregate(newValue)
+ } else {
+ panic("Invalid type in metrica value")
+ }
+ } else {
+ component.Metrics[metricaKey] = newValue
+ }
+ } else {
+ log.Printf("Can not get metrica: %v, got error:%#v", model.GetName(), err)
+ }
+ }
+ return component
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go
new file mode 100644
index 00000000..ef41e969
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/doc.go
@@ -0,0 +1,2 @@
+// Package newrelic_platform_go is New Relic Platform Agent SDK for Go language.
+package newrelic_platform_go
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go
new file mode 100644
index 00000000..57f3cf87
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/examples/wave_plugin.go
@@ -0,0 +1,72 @@
+package main
+
+import (
+ "github.com/yvasiyarov/newrelic_platform_go"
+)
+
+type WaveMetrica struct {
+ sawtoothMax int
+ sawtoothCounter int
+}
+
+func (metrica *WaveMetrica) GetName() string {
+ return "Wave_Metrica"
+}
+func (metrica *WaveMetrica) GetUnits() string {
+ return "Queries/Second"
+}
+func (metrica *WaveMetrica) GetValue() (float64, error) {
+ metrica.sawtoothCounter++
+ if metrica.sawtoothCounter > metrica.sawtoothMax {
+ metrica.sawtoothCounter = 0
+ }
+ return float64(metrica.sawtoothCounter), nil
+}
+
+type SquareWaveMetrica struct {
+ squarewaveMax int
+ squarewaveCounter int
+}
+
+func (metrica *SquareWaveMetrica) GetName() string {
+ return "SquareWave_Metrica"
+}
+func (metrica *SquareWaveMetrica) GetUnits() string {
+ return "Queries/Second"
+}
+func (metrica *SquareWaveMetrica) GetValue() (float64, error) {
+ returnValue := 0
+ metrica.squarewaveCounter++
+
+ if metrica.squarewaveCounter < (metrica.squarewaveMax / 2) {
+ returnValue = 0
+ } else {
+ returnValue = metrica.squarewaveMax
+ }
+
+ if metrica.squarewaveCounter > metrica.squarewaveMax {
+ metrica.squarewaveCounter = 0
+ }
+ return float64(returnValue), nil
+}
+
+func main() {
+ plugin := newrelic_platform_go.NewNewrelicPlugin("0.0.1", "7bceac019c7dcafae1ef95be3e3a3ff8866de246", 60)
+ component := newrelic_platform_go.NewPluginComponent("Wave component", "com.exmaple.plugin.gowave")
+ plugin.AddComponent(component)
+
+ m := &WaveMetrica{
+ sawtoothMax: 10,
+ sawtoothCounter: 5,
+ }
+ component.AddMetrica(m)
+
+ m1 := &SquareWaveMetrica{
+ squarewaveMax: 4,
+ squarewaveCounter: 1,
+ }
+ component.AddMetrica(m1)
+
+ plugin.Verbose = true
+ plugin.Run()
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go
new file mode 100644
index 00000000..fc4fbd48
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/metrica.go
@@ -0,0 +1,42 @@
+package newrelic_platform_go
+
+import (
+ "math"
+)
+
+type IMetrica interface {
+ GetValue() (float64, error)
+ GetName() string
+ GetUnits() string
+}
+
+type MetricaValue interface{}
+
+type SimpleMetricaValue float64
+
+type AggregatedMetricaValue struct {
+ Min float64 `json:"min"`
+ Max float64 `json:"max"`
+ Total float64 `json:"total"`
+ Count int `json:"count"`
+ SumOfSquares float64 `json:"sum_of_squares"`
+}
+
+func NewAggregatedMetricaValue(existValue float64, newValue float64) *AggregatedMetricaValue {
+ v := &AggregatedMetricaValue{
+ Min: math.Min(newValue, existValue),
+ Max: math.Max(newValue, existValue),
+ Total: newValue + existValue,
+ Count: 2,
+ SumOfSquares: newValue*newValue + existValue*existValue,
+ }
+ return v
+}
+
+func (aggregatedValue *AggregatedMetricaValue) Aggregate(newValue float64) {
+ aggregatedValue.Min = math.Min(newValue, aggregatedValue.Min)
+ aggregatedValue.Max = math.Max(newValue, aggregatedValue.Max)
+ aggregatedValue.Total += newValue
+ aggregatedValue.Count++
+ aggregatedValue.SumOfSquares += newValue * newValue
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json
new file mode 100644
index 00000000..1e57c395
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/nut.json
@@ -0,0 +1,15 @@
+{
+ "Version": "0.0.1",
+ "Vendor": "yvasiyarov",
+ "Authors": [
+ {
+ "FullName": "Yuriy Vasiyarov",
+ "Email": "varyous@gmail.com"
+ }
+ ],
+ "ExtraFiles": [
+ "README.md",
+ "LICENSE"
+ ],
+ "Homepage": "https://github.com/yvasiyarov/newrelic_platform_go"
+}
diff --git a/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go
new file mode 100644
index 00000000..3e45666d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/yvasiyarov/newrelic_platform_go/plugin.go
@@ -0,0 +1,194 @@
+package newrelic_platform_go
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "strings"
+ "time"
+)
+
+const (
+ NEWRELIC_API_URL = "https://platform-api.newrelic.com/platform/v1/metrics"
+)
+
+type INewrelicPlugin interface {
+ GetMetricaKey(metrica IMetrica) string
+ Harvest() error
+ Run()
+ AddComponent(component IComponent)
+}
+type NewrelicPlugin struct {
+ Agent *Agent `json:"agent"`
+ Components []ComponentData `json:"components"`
+
+ ComponentModels []IComponent `json:"-"`
+ LastPollTime time.Time `json:"-"`
+ Verbose bool `json:"-"`
+ LicenseKey string `json:"-"`
+ PollIntervalInSecond int `json:"-"`
+}
+
+func NewNewrelicPlugin(version string, licenseKey string, pollInterval int) *NewrelicPlugin {
+ plugin := &NewrelicPlugin{
+ LicenseKey: licenseKey,
+ PollIntervalInSecond: pollInterval,
+ }
+
+ plugin.Agent = NewAgent(version)
+ plugin.Agent.CollectEnvironmentInfo()
+
+ plugin.ComponentModels = []IComponent{}
+ return plugin
+}
+
+func (plugin *NewrelicPlugin) Harvest() error {
+ startTime := time.Now()
+ var duration int
+ if plugin.LastPollTime.IsZero() {
+ duration = plugin.PollIntervalInSecond
+ } else {
+ duration = int(startTime.Sub(plugin.LastPollTime).Seconds())
+ }
+
+ plugin.Components = make([]ComponentData, 0, len(plugin.ComponentModels))
+ for i := 0; i < len(plugin.ComponentModels); i++ {
+ plugin.ComponentModels[i].SetDuration(duration)
+ plugin.Components = append(plugin.Components, plugin.ComponentModels[i].Harvest(plugin))
+ }
+
+ if httpCode, err := plugin.SendMetricas(); err != nil {
+ log.Printf("Can not send metricas to newrelic: %#v\n", err)
+ return err
+ } else {
+
+ if plugin.Verbose {
+ log.Printf("Got HTTP response code:%d", httpCode)
+ }
+
+ if err, isFatal := plugin.CheckResponse(httpCode); isFatal {
+ log.Printf("Got fatal error:%v\n", err)
+ return err
+ } else {
+ if err != nil {
+ log.Printf("WARNING: %v", err)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (plugin *NewrelicPlugin) GetMetricaKey(metrica IMetrica) string {
+ var keyBuffer bytes.Buffer
+
+ keyBuffer.WriteString("Component/")
+ keyBuffer.WriteString(metrica.GetName())
+ keyBuffer.WriteString("[")
+ keyBuffer.WriteString(metrica.GetUnits())
+ keyBuffer.WriteString("]")
+
+ return keyBuffer.String()
+}
+
+func (plugin *NewrelicPlugin) SendMetricas() (int, error) {
+ client := &http.Client{}
+ var metricasJson []byte
+ var encodingError error
+
+ if plugin.Verbose {
+ metricasJson, encodingError = json.MarshalIndent(plugin, "", " ")
+ } else {
+ metricasJson, encodingError = json.Marshal(plugin)
+ }
+
+ if encodingError != nil {
+ return 0, encodingError
+ }
+
+ jsonAsString := string(metricasJson)
+ if plugin.Verbose {
+ log.Printf("Send data:%s \n", jsonAsString)
+ }
+
+ if httpRequest, err := http.NewRequest("POST", NEWRELIC_API_URL, strings.NewReader(jsonAsString)); err != nil {
+ return 0, err
+ } else {
+ httpRequest.Header.Set("X-License-Key", plugin.LicenseKey)
+ httpRequest.Header.Set("Content-Type", "application/json")
+ httpRequest.Header.Set("Accept", "application/json")
+
+ if httpResponse, err := client.Do(httpRequest); err != nil {
+ return 0, err
+ } else {
+ defer httpResponse.Body.Close()
+ return httpResponse.StatusCode, nil
+ }
+ }
+
+ // we will never get there
+ return 0, nil
+}
+
+func (plugin *NewrelicPlugin) ClearSentData() {
+ for _, component := range plugin.ComponentModels {
+ component.ClearSentData()
+ }
+ plugin.Components = nil
+ plugin.LastPollTime = time.Now()
+}
+
+func (plugin *NewrelicPlugin) CheckResponse(httpResponseCode int) (error, bool) {
+ isFatal := false
+ var err error
+ switch httpResponseCode {
+ case http.StatusOK:
+ {
+ plugin.ClearSentData()
+ }
+ case http.StatusForbidden:
+ {
+ err = fmt.Errorf("Authentication error (no license key header, or invalid license key).\n")
+ isFatal = true
+ }
+ case http.StatusBadRequest:
+ {
+ err = fmt.Errorf("The request or headers are in the wrong format or the URL is incorrect.\n")
+ isFatal = true
+ }
+ case http.StatusNotFound:
+ {
+ err = fmt.Errorf("Invalid URL\n")
+ isFatal = true
+ }
+ case http.StatusRequestEntityTooLarge:
+ {
+ err = fmt.Errorf("Too many metrics were sent in one request, or too many components (instances) were specified in one request, or other single-request limits were reached.\n")
+ //discard metrics
+ plugin.ClearSentData()
+ }
+ case http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout:
+ {
+ err = fmt.Errorf("Got %v response code.Metricas will be aggregated", httpResponseCode)
+ }
+ }
+ return err, isFatal
+}
+
+func (plugin *NewrelicPlugin) Run() {
+ plugin.Harvest()
+ tickerChannel := time.Tick(time.Duration(plugin.PollIntervalInSecond) * time.Second)
+ for ts := range tickerChannel {
+ plugin.Harvest()
+
+ if plugin.Verbose {
+ log.Printf("Harvest ended at:%v\n", ts)
+ }
+ }
+}
+
+func (plugin *NewrelicPlugin) AddComponent(component IComponent) {
+ plugin.ComponentModels = append(plugin.ComponentModels, component)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/LICENSE
new file mode 100644
index 00000000..a68e67f0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/LICENSE
@@ -0,0 +1,188 @@
+
+Copyright (c) 2011-2014 - Canonical Inc.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/LICENSE.libyaml b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/LICENSE.libyaml
new file mode 100644
index 00000000..8da58fbf
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/README.md b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/README.md
new file mode 100644
index 00000000..d6c919e6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/README.md
@@ -0,0 +1,128 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct{C int; D []int ",flow"}
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/apic.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/apic.go
new file mode 100644
index 00000000..95ec014e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/decode.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/decode.go
new file mode 100644
index 00000000..c7647eef
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/decode.go
@@ -0,0 +1,651 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ }
+ panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+ mapType reflect.Type
+ terrors []string
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+ d := &decoder{mapType: defaultMapType}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ if d.aliases[n.value] {
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if s, ok := resolved.(string); ok && out.CanAddr() {
+ if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ err := u.UnmarshalText([]byte(s))
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ good = true
+ } else if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case float64:
+ if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ if !good {
+ d.terror(n, tag, out)
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ // okay
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, 0))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ l := len(n.children)
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Set(reflect.Append(out, e))
+ }
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/decode_test.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/decode_test.go
new file mode 100644
index 00000000..ae688960
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/decode_test.go
@@ -0,0 +1,825 @@
+package yaml_test
+
+import (
+ "errors"
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+ "math"
+ "net"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var unmarshalIntTest = 123
+
+var unmarshalTests = []struct {
+ data string
+ value interface{}
+}{
+ {
+ "",
+ &struct{}{},
+ }, {
+ "{}", &struct{}{},
+ }, {
+ "v: hi",
+ map[string]string{"v": "hi"},
+ }, {
+ "v: hi", map[string]interface{}{"v": "hi"},
+ }, {
+ "v: true",
+ map[string]string{"v": "true"},
+ }, {
+ "v: true",
+ map[string]interface{}{"v": true},
+ }, {
+ "v: 10",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 0b10",
+ map[string]interface{}{"v": 2},
+ }, {
+ "v: 0xA",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 4294967296",
+ map[string]int64{"v": 4294967296},
+ }, {
+ "v: 0.1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .Inf",
+ map[string]interface{}{"v": math.Inf(+1)},
+ }, {
+ "v: -.Inf",
+ map[string]interface{}{"v": math.Inf(-1)},
+ }, {
+ "v: -10",
+ map[string]interface{}{"v": -10},
+ }, {
+ "v: -.1",
+ map[string]interface{}{"v": -0.1},
+ },
+
+ // Simple values.
+ {
+ "123",
+ &unmarshalIntTest,
+ },
+
+ // Floats from spec
+ {
+ "canonical: 6.8523e+5",
+ map[string]interface{}{"canonical": 6.8523e+5},
+ }, {
+ "expo: 685.230_15e+03",
+ map[string]interface{}{"expo": 685.23015e+03},
+ }, {
+ "fixed: 685_230.15",
+ map[string]interface{}{"fixed": 685230.15},
+ }, {
+ "neginf: -.inf",
+ map[string]interface{}{"neginf": math.Inf(-1)},
+ }, {
+ "fixed: 685_230.15",
+ map[string]float64{"fixed": 685230.15},
+ },
+ //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
+ //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
+
+ // Bools from spec
+ {
+ "canonical: y",
+ map[string]interface{}{"canonical": true},
+ }, {
+ "answer: NO",
+ map[string]interface{}{"answer": false},
+ }, {
+ "logical: True",
+ map[string]interface{}{"logical": true},
+ }, {
+ "option: on",
+ map[string]interface{}{"option": true},
+ }, {
+ "option: on",
+ map[string]bool{"option": true},
+ },
+ // Ints from spec
+ {
+ "canonical: 685230",
+ map[string]interface{}{"canonical": 685230},
+ }, {
+ "decimal: +685_230",
+ map[string]interface{}{"decimal": 685230},
+ }, {
+ "octal: 02472256",
+ map[string]interface{}{"octal": 685230},
+ }, {
+ "hexa: 0x_0A_74_AE",
+ map[string]interface{}{"hexa": 685230},
+ }, {
+ "bin: 0b1010_0111_0100_1010_1110",
+ map[string]interface{}{"bin": 685230},
+ }, {
+ "bin: -0b101010",
+ map[string]interface{}{"bin": -42},
+ }, {
+ "decimal: +685_230",
+ map[string]int{"decimal": 685230},
+ },
+
+ //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
+
+ // Nulls from spec
+ {
+ "empty:",
+ map[string]interface{}{"empty": nil},
+ }, {
+ "canonical: ~",
+ map[string]interface{}{"canonical": nil},
+ }, {
+ "english: null",
+ map[string]interface{}{"english": nil},
+ }, {
+ "~: null key",
+ map[interface{}]string{nil: "null key"},
+ }, {
+ "empty:",
+ map[string]*bool{"empty": nil},
+ },
+
+ // Flow sequence
+ {
+ "seq: [A,B]",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq: [A,B,C,]",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq: [A,1,C]",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+ // Block sequence
+ {
+ "seq:\n - A\n - B",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq:\n - A\n - B\n - C",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+
+ // Literal block scalar
+ {
+ "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
+ map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
+ },
+
+ // Folded block scalar
+ {
+ "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
+ map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
+ },
+
+ // Map inside interface with no type hints.
+ {
+ "a: {b: c}",
+ map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ },
+
+ // Structs and type conversions.
+ {
+ "hello: world",
+ &struct{ Hello string }{"world"},
+ }, {
+ "a: {b: c}",
+ &struct{ A struct{ B string } }{struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A map[string]string }{map[string]string{"b": "c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
+ }, {
+ "a:",
+ &struct{ A map[string]string }{},
+ }, {
+ "a: 1",
+ &struct{ A int }{1},
+ }, {
+ "a: 1",
+ &struct{ A float64 }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A int }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A uint }{1},
+ }, {
+ "a: [1, 2]",
+ &struct{ A []int }{[]int{1, 2}},
+ }, {
+ "a: 1",
+ &struct{ B int }{0},
+ }, {
+ "a: 1",
+ &struct {
+ B int "a"
+ }{1},
+ }, {
+ "a: y",
+ &struct{ A bool }{true},
+ },
+
+ // Some cross type conversions
+ {
+ "v: 42",
+ map[string]uint{"v": 42},
+ }, {
+ "v: -42",
+ map[string]uint{},
+ }, {
+ "v: 4294967296",
+ map[string]uint64{"v": 4294967296},
+ }, {
+ "v: -4294967296",
+ map[string]uint64{},
+ },
+
+ // Overflow cases.
+ {
+ "v: 4294967297",
+ map[string]int32{},
+ }, {
+ "v: 128",
+ map[string]int8{},
+ },
+
+ // Quoted values.
+ {
+ "'1': '\"2\"'",
+ map[interface{}]interface{}{"1": "\"2\""},
+ }, {
+ "v:\n- A\n- 'B\n\n C'\n",
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ },
+
+ // Explicit tags.
+ {
+ "v: !!float '1.1'",
+ map[string]interface{}{"v": 1.1},
+ }, {
+ "v: !!null ''",
+ map[string]interface{}{"v": nil},
+ }, {
+ "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
+ map[string]interface{}{"v": 1},
+ },
+
+ // Anchors and aliases.
+ {
+ "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
+ &struct{ A, B, C, D int }{1, 2, 1, 2},
+ }, {
+ "a: &a {c: 1}\nb: *a",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ }, {
+ "a: &a [1, 2]\nb: *a",
+ &struct{ B []int }{[]int{1, 2}},
+ },
+
+ // Bug #1133337
+ {
+ "foo: ''",
+ map[string]*string{"foo": new(string)},
+ }, {
+ "foo: null",
+ map[string]string{"foo": ""},
+ }, {
+ "foo: null",
+ map[string]interface{}{"foo": nil},
+ },
+
+ // Ignored field
+ {
+ "a: 1\nb: 2\n",
+ &struct {
+ A int
+ B int "-"
+ }{1, 0},
+ },
+
+ // Bug #1191981
+ {
+ "" +
+ "%YAML 1.1\n" +
+ "--- !!str\n" +
+ `"Generic line break (no glyph)\n\` + "\n" +
+ ` Generic line break (glyphed)\n\` + "\n" +
+ ` Line separator\u2028\` + "\n" +
+ ` Paragraph separator\u2029"` + "\n",
+ "" +
+ "Generic line break (no glyph)\n" +
+ "Generic line break (glyphed)\n" +
+ "Line separator\u2028Paragraph separator\u2029",
+ },
+
+ // Struct inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ },
+
+ // bug 1243827
+ {
+ "a: -b_c",
+ map[string]interface{}{"a": "-b_c"},
+ },
+ {
+ "a: +b_c",
+ map[string]interface{}{"a": "+b_c"},
+ },
+ {
+ "a: 50cent_of_dollar",
+ map[string]interface{}{"a": "50cent_of_dollar"},
+ },
+
+ // Duration
+ {
+ "a: 3s",
+ map[string]time.Duration{"a": 3 * time.Second},
+ },
+
+ // Issue #24.
+ {
+ "a: ",
+ map[string]string{"a": ""},
+ },
+
+ // Base 60 floats are obsolete and unsupported.
+ {
+ "a: 1:1\n",
+ map[string]string{"a": "1:1"},
+ },
+
+ // Binary data.
+ {
+ "a: !!binary gIGC\n",
+ map[string]string{"a": "\x80\x81\x82"},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
+ map[string]string{"a": strings.Repeat("\x00", 52)},
+ },
+
+ // Ordered maps.
+ {
+ "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ },
+
+ // Issue #39.
+ {
+ "a:\n b:\n c: d\n",
+ map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
+ },
+
+ // Custom map type.
+ {
+ "a: {b: c}",
+ M{"a": M{"b": "c"}},
+ },
+
+ // Support encoding.TextUnmarshaler.
+ {
+ "a: 1.2.3.4\n",
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ },
+}
+
+type M map[interface{}]interface{}
+
+type inlineB struct {
+ B int
+ inlineC `yaml:",inline"`
+}
+
+type inlineC struct {
+ C int
+}
+
+func (s *S) TestUnmarshal(c *C) {
+ for _, item := range unmarshalTests {
+ t := reflect.ValueOf(item.value).Type()
+ var value interface{}
+ switch t.Kind() {
+ case reflect.Map:
+ value = reflect.MakeMap(t).Interface()
+ case reflect.String:
+ value = reflect.New(t).Interface()
+ case reflect.Ptr:
+ value = reflect.New(t.Elem()).Interface()
+ default:
+ c.Fatalf("missing case for %s", t)
+ }
+ err := yaml.Unmarshal([]byte(item.data), value)
+ if _, ok := err.(*yaml.TypeError); !ok {
+ c.Assert(err, IsNil)
+ }
+ if t.Kind() == reflect.String {
+ c.Assert(*value.(*string), Equals, item.value)
+ } else {
+ c.Assert(value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalNaN(c *C) {
+ value := map[string]interface{}{}
+ err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
+ c.Assert(err, IsNil)
+ c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
+}
+
+var unmarshalErrorTests = []struct {
+ data, error string
+}{
+ {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
+ {"v: [A,", "yaml: line 1: did not find expected node content"},
+ {"v:\n- [A,", "yaml: line 2: did not find expected node content"},
+ {"a: *b\n", "yaml: unknown anchor 'b' referenced"},
+ {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
+ {"value: -", "yaml: block sequence entries are not allowed in this context"},
+ {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
+ {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
+ {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
+}
+
+func (s *S) TestUnmarshalErrors(c *C) {
+ for _, item := range unmarshalErrorTests {
+ var value interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+}
+
+var unmarshalerTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
+ {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
+ {"_: 10", "!!int", 10},
+ {"_: null", "!!null", nil},
+ {`_: BAR!`, "!!str", "BAR!"},
+ {`_: "BAR!"`, "!!str", "BAR!"},
+ {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+}
+
+var unmarshalerResult = map[int]error{}
+
+type unmarshalerType struct {
+ value interface{}
+}
+
+func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
+ if err := unmarshal(&o.value); err != nil {
+ return err
+ }
+ if i, ok := o.value.(int); ok {
+ if result, ok := unmarshalerResult[i]; ok {
+ return result
+ }
+ }
+ return nil
+}
+
+type unmarshalerPointer struct {
+ Field *unmarshalerType "_"
+}
+
+type unmarshalerValue struct {
+ Field unmarshalerType "_"
+}
+
+func (s *S) TestUnmarshalerPointerField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerPointer{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ if item.value == nil {
+ c.Assert(obj.Field, IsNil)
+ } else {
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalerValueField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerValue{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalerWholeDocument(c *C) {
+ obj := &unmarshalerType{}
+ err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
+ c.Assert(err, IsNil)
+ value, ok := obj.value.(map[interface{}]interface{})
+ c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
+ c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
+}
+
+func (s *S) TestUnmarshalerTypeError(c *C) {
+ unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
+ unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
+ defer func() {
+ delete(unmarshalerResult, 2)
+ delete(unmarshalerResult, 4)
+ }()
+
+ type T struct {
+ Before int
+ After int
+ M map[string]*unmarshalerType
+ }
+ var v T
+ data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " foo\n"+
+ " bar\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+ c.Assert(v.M["abc"], NotNil)
+ c.Assert(v.M["def"], IsNil)
+ c.Assert(v.M["ghi"], NotNil)
+ c.Assert(v.M["jkl"], IsNil)
+
+ c.Assert(v.M["abc"].value, Equals, 1)
+ c.Assert(v.M["ghi"].value, Equals, 3)
+}
+
+type proxyTypeError struct{}
+
+func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ var a int32
+ var b int64
+ if err := unmarshal(&s); err != nil {
+ panic(err)
+ }
+ if s == "a" {
+ if err := unmarshal(&b); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&a)
+ }
+ if err := unmarshal(&a); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&b)
+}
+
+func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
+ type T struct {
+ Before int
+ After int
+ M map[string]*proxyTypeError
+ }
+ var v T
+ data := `{before: A, m: {abc: a, def: b}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " line 1: cannot unmarshal !!str `a` into int32\n"+
+ " line 1: cannot unmarshal !!str `b` into int64\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+}
+
+type failingUnmarshaler struct{}
+
+var failingErr = errors.New("failingErr")
+
+func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ return failingErr
+}
+
+func (s *S) TestUnmarshalerError(c *C) {
+ err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+type sliceUnmarshaler []int
+
+func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var slice []int
+ err := unmarshal(&slice)
+ if err == nil {
+ *su = slice
+ return nil
+ }
+
+ var intVal int
+ err = unmarshal(&intVal)
+ if err == nil {
+ *su = []int{intVal}
+ return nil
+ }
+
+ return err
+}
+
+func (s *S) TestUnmarshalerRetry(c *C) {
+ var su sliceUnmarshaler
+ err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
+ c.Assert(err, IsNil)
+ c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
+
+ err = yaml.Unmarshal([]byte("1"), &su)
+ c.Assert(err, IsNil)
+ c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
+}
+
+// From http://yaml.org/type/merge.html
+var mergeTests = `
+anchors:
+ list:
+ - &CENTER { "x": 1, "y": 2 }
+ - &LEFT { "x": 0, "y": 2 }
+ - &BIG { "r": 10 }
+ - &SMALL { "r": 1 }
+
+# All the following maps are equal:
+
+plain:
+ # Explicit keys
+ "x": 1
+ "y": 2
+ "r": 10
+ label: center/big
+
+mergeOne:
+ # Merge one map
+ << : *CENTER
+ "r": 10
+ label: center/big
+
+mergeMultiple:
+ # Merge multiple maps
+ << : [ *CENTER, *BIG ]
+ label: center/big
+
+override:
+ # Override
+ << : [ *BIG, *LEFT, *SMALL ]
+ "x": 1
+ label: center/big
+
+shortTag:
+ # Explicit short merge tag
+ !!merge "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+longTag:
+ # Explicit merge long tag
+ ! "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+inlineMap:
+ # Inlined map
+ << : {"x": 1, "y": 2, "r": 10}
+ label: center/big
+
+inlineSequenceMap:
+ # Inlined map in sequence
+ << : [ *CENTER, {"r": 10} ]
+ label: center/big
+`
+
+func (s *S) TestMerge(c *C) {
+ var want = map[interface{}]interface{}{
+ "x": 1,
+ "y": 2,
+ "r": 10,
+ "label": "center/big",
+ }
+
+ var m map[interface{}]interface{}
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
+ }
+}
+
+func (s *S) TestMergeStruct(c *C) {
+ type Data struct {
+ X, Y, R int
+ Label string
+ }
+ want := Data{1, 2, 10, "center/big"}
+
+ var m map[string]Data
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, Equals, want, Commentf("test %q failed", name))
+ }
+}
+
+var unmarshalNullTests = []func() interface{}{
+ func() interface{} { var v interface{}; v = "v"; return &v },
+ func() interface{} { var s = "s"; return &s },
+ func() interface{} { var s = "s"; sptr := &s; return &sptr },
+ func() interface{} { var i = 1; return &i },
+ func() interface{} { var i = 1; iptr := &i; return &iptr },
+ func() interface{} { m := map[string]int{"s": 1}; return &m },
+ func() interface{} { m := map[string]int{"s": 1}; return m },
+}
+
+func (s *S) TestUnmarshalNull(c *C) {
+ for _, test := range unmarshalNullTests {
+ item := test()
+ zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
+ err := yaml.Unmarshal([]byte("null"), item)
+ c.Assert(err, IsNil)
+ if reflect.TypeOf(item).Kind() == reflect.Map {
+ c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
+ } else {
+ c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
+ }
+ }
+}
+
+//var data []byte
+//func init() {
+// var err error
+// data, err = ioutil.ReadFile("/tmp/file.yaml")
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkUnmarshal(c *C) {
+// var err error
+// for i := 0; i < c.N; i++ {
+// var v map[string]interface{}
+// err = yaml.Unmarshal(data, &v)
+// }
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkMarshal(c *C) {
+// var v map[string]interface{}
+// yaml.Unmarshal(data, &v)
+// c.ResetTimer()
+// for i := 0; i < c.N; i++ {
+// yaml.Marshal(&v)
+// }
+//}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/emitterc.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/emitterc.go
new file mode 100644
index 00000000..9b3dc4a4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+ return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[0])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/encode.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/encode.go
new file mode 100644
index 00000000..972bc038
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/encode.go
@@ -0,0 +1,290 @@
+package yaml
+
+import (
+ "encoding"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ if m, ok := iface.(Marshaler); ok {
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ }
+ if m, ok := iface.(encoding.TextMarshaler); ok {
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ rtag, rs := resolve("", s)
+ if rtag == yaml_BINARY_TAG {
+ if tag == "" || tag == yaml_STR_TAG {
+ tag = rtag
+ s = rs.(string)
+ } else if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ } else {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ }
+ if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if strings.Contains(s, "\n") {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/encode_test.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/encode_test.go
new file mode 100644
index 00000000..cdbf64a9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/encode_test.go
@@ -0,0 +1,434 @@
+package yaml_test
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+ "net"
+)
+
+var marshalIntTest = 123
+
+var marshalTests = []struct {
+ value interface{}
+ data string
+}{
+ {
+ nil,
+ "null\n",
+ }, {
+ &struct{}{},
+ "{}\n",
+ }, {
+ map[string]string{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]interface{}{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]string{"v": "true"},
+ "v: \"true\"\n",
+ }, {
+ map[string]string{"v": "false"},
+ "v: \"false\"\n",
+ }, {
+ map[string]interface{}{"v": true},
+ "v: true\n",
+ }, {
+ map[string]interface{}{"v": false},
+ "v: false\n",
+ }, {
+ map[string]interface{}{"v": 10},
+ "v: 10\n",
+ }, {
+ map[string]interface{}{"v": -10},
+ "v: -10\n",
+ }, {
+ map[string]uint{"v": 42},
+ "v: 42\n",
+ }, {
+ map[string]interface{}{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]int64{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]uint64{"v": 4294967296},
+ "v: 4294967296\n",
+ }, {
+ map[string]interface{}{"v": "10"},
+ "v: \"10\"\n",
+ }, {
+ map[string]interface{}{"v": 0.1},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float64(0.1)},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": -0.1},
+ "v: -0.1\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(+1)},
+ "v: .inf\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(-1)},
+ "v: -.inf\n",
+ }, {
+ map[string]interface{}{"v": math.NaN()},
+ "v: .nan\n",
+ }, {
+ map[string]interface{}{"v": nil},
+ "v: null\n",
+ }, {
+ map[string]interface{}{"v": ""},
+ "v: \"\"\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B"}},
+ "v:\n- A\n- B\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ "v:\n- A\n- |-\n B\n C\n",
+ }, {
+ map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
+ "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
+ }, {
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ "a:\n b: c\n",
+ }, {
+ map[string]interface{}{"a": "-"},
+ "a: '-'\n",
+ },
+
+ // Simple values.
+ {
+ &marshalIntTest,
+ "123\n",
+ },
+
+ // Structures
+ {
+ &struct{ Hello string }{"world"},
+ "hello: world\n",
+ }, {
+ &struct {
+ A struct {
+ B string
+ }
+ }{struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{&struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{},
+ "a: null\n",
+ }, {
+ &struct{ A int }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A []int }{[]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct {
+ B int "a"
+ }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A bool }{true},
+ "a: true\n",
+ },
+
+ // Conditional flag
+ {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ }, {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{0, 0},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X int } "a,omitempty"
+ B int "b,omitempty"
+ }{nil, 0},
+ "{}\n",
+ },
+
+ // Flow flag
+ {
+ &struct {
+ A []int "a,flow"
+ }{[]int{1, 2}},
+ "a: [1, 2]\n",
+ }, {
+ &struct {
+ A map[string]string "a,flow"
+ }{map[string]string{"b": "c", "d": "e"}},
+ "a: {b: c, d: e}\n",
+ }, {
+ &struct {
+ A struct {
+ B, D string
+ } "a,flow"
+ }{struct{ B, D string }{"c", "e"}},
+ "a: {b: c, d: e}\n",
+ },
+
+ // Unexported field
+ {
+ &struct {
+ u int
+ A int
+ }{0, 1},
+ "a: 1\n",
+ },
+
+ // Ignored field
+ {
+ &struct {
+ A int
+ B int "-"
+ }{1, 2},
+ "a: 1\n",
+ },
+
+ // Struct inlining
+ {
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Duration
+ {
+ map[string]time.Duration{"a": 3 * time.Second},
+ "a: 3s\n",
+ },
+
+ // Issue #24: bug in map merging logic.
+ {
+ map[string]string{"a": ""},
+ "a: \n",
+ },
+
+ // Issue #34: marshal unsupported base 60 floats quoted for compatibility
+ // with old YAML 1.1 parsers.
+ {
+ map[string]string{"a": "1:1"},
+ "a: \"1:1\"\n",
+ },
+
+ // Binary data.
+ {
+ map[string]string{"a": "\x00"},
+ "a: \"\\0\"\n",
+ }, {
+ map[string]string{"a": "\x80\x81\x82"},
+ "a: !!binary gIGC\n",
+ }, {
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ },
+
+ // Ordered maps.
+ {
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
+ },
+
+ // Encode unicode as utf-8 rather than in escaped form.
+ {
+ map[string]string{"a": "你好"},
+ "a: 你好\n",
+ },
+
+ // Support encoding.TextMarshaler.
+ {
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ "a: 1.2.3.4\n",
+ },
+}
+
+func (s *S) TestMarshal(c *C) {
+ for _, item := range marshalTests {
+ data, err := yaml.Marshal(item.value)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data)
+ }
+}
+
+var marshalErrorTests = []struct {
+ value interface{}
+ error string
+ panic string
+}{{
+ value: &struct {
+ B int
+ inlineB ",inline"
+ }{1, inlineB{2, inlineC{3}}},
+ panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
+}}
+
+func (s *S) TestMarshalErrors(c *C) {
+ for _, item := range marshalErrorTests {
+ if item.panic != "" {
+ c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
+ } else {
+ _, err := yaml.Marshal(item.value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+ }
+}
+
+func (s *S) TestMarshalTypeCache(c *C) {
+ var data []byte
+ var err error
+ func() {
+ type T struct{ A int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ func() {
+ type T struct{ B int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ c.Assert(string(data), Equals, "b: 0\n")
+}
+
+var marshalerTests = []struct {
+ data string
+ value interface{}
+}{
+ {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
+ {"_:\n- 1\n- A\n", []interface{}{1, "A"}},
+ {"_: 10\n", 10},
+ {"_: null\n", nil},
+ {"_: BAR!\n", "BAR!"},
+}
+
+type marshalerType struct {
+ value interface{}
+}
+
+func (o marshalerType) MarshalYAML() (interface{}, error) {
+ return o.value, nil
+}
+
+type marshalerValue struct {
+ Field marshalerType "_"
+}
+
+func (s *S) TestMarshaler(c *C) {
+ for _, item := range marshalerTests {
+ obj := &marshalerValue{}
+ obj.Field.value = item.value
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, string(item.data))
+ }
+}
+
+func (s *S) TestMarshalerWholeDocument(c *C) {
+ obj := &marshalerType{}
+ obj.value = map[string]string{"hello": "world!"}
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hello: world!\n")
+}
+
+type failingMarshaler struct{}
+
+func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
+ return nil, failingErr
+}
+
+func (s *S) TestMarshalerError(c *C) {
+ _, err := yaml.Marshal(&failingMarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+func (s *S) TestSortedOutput(c *C) {
+ order := []interface{}{
+ false,
+ true,
+ 1,
+ uint(1),
+ 1.0,
+ 1.1,
+ 1.2,
+ 2,
+ uint(2),
+ 2.0,
+ 2.1,
+ "",
+ ".1",
+ ".2",
+ ".a",
+ "1",
+ "2",
+ "a!10",
+ "a/2",
+ "a/10",
+ "a~10",
+ "ab/1",
+ "b/1",
+ "b/01",
+ "b/2",
+ "b/02",
+ "b/3",
+ "b/03",
+ "b1",
+ "b01",
+ "b3",
+ "c2.10",
+ "c10.2",
+ "d1",
+ "d12",
+ "d12a",
+ }
+ m := make(map[interface{}]int)
+ for _, k := range order {
+ m[k] = 1
+ }
+ data, err := yaml.Marshal(m)
+ c.Assert(err, IsNil)
+ out := "\n" + string(data)
+ last := 0
+ for i, k := range order {
+ repr := fmt.Sprint(k)
+ if s, ok := k.(string); ok {
+ if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
+ repr = `"` + repr + `"`
+ }
+ }
+ index := strings.Index(out, "\n"+repr+":")
+ if index == -1 {
+ c.Fatalf("%#v is not in the output: %#v", k, out)
+ }
+ if index < last {
+ c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
+ }
+ last = index
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/parserc.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/parserc.go
new file mode 100644
index 00000000..0a7037ad
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+ return false
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/readerc.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/readerc.go
new file mode 100644
index 00000000..d5fb0972
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/readerc.go
@@ -0,0 +1,391 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ }
+ buffer_len += width
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/resolve.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/resolve.go
new file mode 100644
index 00000000..89cd7ec2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/resolve.go
@@ -0,0 +1,187 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ return true
+ }
+ return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, int(intv)
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, -int(intv)
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ if tag == yaml_BINARY_TAG {
+ return yaml_BINARY_TAG, in
+ }
+ if utf8.ValidString(in) {
+ return yaml_STR_TAG, in
+ }
+ return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/scannerc.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/scannerc.go
new file mode 100644
index 00000000..fe93b190
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each intendation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the intendation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found uknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the intendation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+
+ // Get the intendation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the intendation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following intendation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan intendation spaces and line breaks for a block scalar. Determine the
+// intendation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the intendation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the intendation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the intendation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an intendation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse intendation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate intendation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check intendation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/sorter.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/sorter.go
new file mode 100644
index 00000000..5958822f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/suite_test.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/suite_test.go
new file mode 100644
index 00000000..c5cf1ed4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/suite_test.go
@@ -0,0 +1,12 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var _ = Suite(&S{})
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/writerc.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/writerc.go
new file mode 100644
index 00000000..190362f2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yaml.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yaml.go
new file mode 100644
index 00000000..70fb66b4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yaml.go
@@ -0,0 +1,334 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps.
+//
+// inline Inline the struct it's applied to, so its fields
+// are processed as if they were part of the outer
+// struct.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: " + format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ // TODO: Implement support for inline maps.
+ //case reflect.Map:
+ // if inlineMap >= 0 {
+ // return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ // }
+ // if field.Type.Key() != reflect.TypeOf("") {
+ // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ // }
+ // inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yamlh.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yamlh.go
new file mode 100644
index 00000000..4b020b1b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yamlprivateh.go
new file mode 100644
index 00000000..8110ce3c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/BrianBland/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore
new file mode 100644
index 00000000..191a5360
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/.gitignore
@@ -0,0 +1,4 @@
+_*
+*.swp
+*.[568]
+[568].out
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE
new file mode 100644
index 00000000..545cf2d3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE
@@ -0,0 +1,25 @@
+Gocheck - A rich testing framework for Go
+
+Copyright (c) 2010-2013 Gustavo Niemeyer
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/README.md b/Godeps/_workspace/src/gopkg.in/check.v1/README.md
new file mode 100644
index 00000000..0ca9e572
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/README.md
@@ -0,0 +1,20 @@
+Instructions
+============
+
+Install the package with:
+
+ go get gopkg.in/check.v1
+
+Import it with:
+
+ import "gopkg.in/check.v1"
+
+and use _check_ as the package name inside the code.
+
+For more details, visit the project page:
+
+* http://labix.org/gocheck
+
+and the API documentation:
+
+* https://gopkg.in/check.v1
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/TODO b/Godeps/_workspace/src/gopkg.in/check.v1/TODO
new file mode 100644
index 00000000..33498270
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/TODO
@@ -0,0 +1,2 @@
+- Assert(slice, Contains, item)
+- Parallel test support
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go
new file mode 100644
index 00000000..48cb8c81
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go
@@ -0,0 +1,163 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package check
+
+import (
+ "fmt"
+ "runtime"
+ "time"
+)
+
+var memStats runtime.MemStats
+
+// testingB is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type timer struct {
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ N int
+ bytes int64
+ timerOn bool
+ benchTime time.Duration
+ // The initial states of memStats.Mallocs and memStats.TotalAlloc.
+ startAllocs uint64
+ startBytes uint64
+ // The net total of this test after being run.
+ netAllocs uint64
+ netBytes uint64
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (c *C) StartTimer() {
+ if !c.timerOn {
+ c.start = time.Now()
+ c.timerOn = true
+
+ runtime.ReadMemStats(&memStats)
+ c.startAllocs = memStats.Mallocs
+ c.startBytes = memStats.TotalAlloc
+ }
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (c *C) StopTimer() {
+ if c.timerOn {
+ c.duration += time.Now().Sub(c.start)
+ c.timerOn = false
+ runtime.ReadMemStats(&memStats)
+ c.netAllocs += memStats.Mallocs - c.startAllocs
+ c.netBytes += memStats.TotalAlloc - c.startBytes
+ }
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (c *C) ResetTimer() {
+ if c.timerOn {
+ c.start = time.Now()
+ runtime.ReadMemStats(&memStats)
+ c.startAllocs = memStats.Mallocs
+ c.startBytes = memStats.TotalAlloc
+ }
+ c.duration = 0
+ c.netAllocs = 0
+ c.netBytes = 0
+}
+
+// SetBytes informs the number of bytes that the benchmark processes
+// on each iteration. If this is called in a benchmark it will also
+// report MB/s.
+func (c *C) SetBytes(n int64) {
+ c.bytes = n
+}
+
+func (c *C) nsPerOp() int64 {
+ if c.N <= 0 {
+ return 0
+ }
+ return c.duration.Nanoseconds() / int64(c.N)
+}
+
+func (c *C) mbPerSec() float64 {
+ if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
+ return 0
+ }
+ return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
+}
+
+func (c *C) timerString() string {
+ if c.N <= 0 {
+ return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
+ }
+ mbs := c.mbPerSec()
+ mb := ""
+ if mbs != 0 {
+ mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+ }
+ nsop := c.nsPerOp()
+ ns := fmt.Sprintf("%10d ns/op", nsop)
+ if c.N > 0 && nsop < 100 {
+ // The format specifiers here make sure that
+ // the ones digits line up for all three possible formats.
+ if nsop < 10 {
+ ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+ } else {
+ ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+ }
+ }
+ memStats := ""
+ if c.benchMem {
+ allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
+ allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
+ memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
+ }
+ return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
+}
+
+func min(x, y int) int {
+ if x > y {
+ return y
+ }
+ return x
+}
+
+func max(x, y int) int {
+ if x < y {
+ return y
+ }
+ return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+ var tens = 0
+ // tens = floor(log_10(n))
+ for n > 10 {
+ n = n / 10
+ tens++
+ }
+ // result = 10^tens
+ result := 1
+ for i := 0; i < tens; i++ {
+ result *= 10
+ }
+ return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+ base := roundDown10(n)
+ if n < (2 * base) {
+ return 2 * base
+ }
+ if n < (5 * base) {
+ return 5 * base
+ }
+ return 10 * base
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go
new file mode 100644
index 00000000..4dd827c1
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go
@@ -0,0 +1,91 @@
+// These tests verify the test running logic.
+
+package check_test
+
+import (
+ "time"
+ . "gopkg.in/check.v1"
+)
+
+var benchmarkS = Suite(&BenchmarkS{})
+
+type BenchmarkS struct{}
+
+func (s *BenchmarkS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+func (s *BenchmarkS) TestBasicTestTiming(c *C) {
+ helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond}
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true}
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" +
+ "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestStreamTestTiming(c *C) {
+ helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond}
+ output := String{}
+ runConf := RunConf{Output: &output, Stream: true}
+ Run(&helper, &runConf)
+
+ expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmark(c *C) {
+ helper := FixtureHelper{sleep: 100000}
+ output := String{}
+ runConf := RunConf{
+ Output: &output,
+ Benchmark: true,
+ BenchmarkTime: 10000000,
+ Filter: "Benchmark1",
+ }
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Benchmark1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Benchmark1")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ // ... and more.
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkBytes(c *C) {
+ helper := FixtureHelper{sleep: 100000}
+ output := String{}
+ runConf := RunConf{
+ Output: &output,
+ Benchmark: true,
+ BenchmarkTime: 10000000,
+ Filter: "Benchmark2",
+ }
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkMem(c *C) {
+ helper := FixtureHelper{sleep: 100000}
+ output := String{}
+ runConf := RunConf{
+ Output: &output,
+ Benchmark: true,
+ BenchmarkMem: true,
+ BenchmarkTime: 10000000,
+ Filter: "Benchmark3",
+ }
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n"
+ c.Assert(output.value, Matches, expected)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go
new file mode 100644
index 00000000..e55f327c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go
@@ -0,0 +1,82 @@
+// These initial tests are for bootstrapping. They verify that we can
+// basically use the testing infrastructure itself to check if the test
+// system is working.
+//
+// These tests use will break down the test runner badly in case of
+// errors because if they simply fail, we can't be sure the developer
+// will ever see anything (because failing means the failing system
+// somehow isn't working! :-)
+//
+// Do not assume *any* internal functionality works as expected besides
+// what's actually tested here.
+
+package check_test
+
+import (
+ "fmt"
+ "gopkg.in/check.v1"
+ "strings"
+)
+
+type BootstrapS struct{}
+
+var boostrapS = check.Suite(&BootstrapS{})
+
+func (s *BootstrapS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+func (s *BootstrapS) TestFailedAndFail(c *check.C) {
+ if c.Failed() {
+ critical("c.Failed() must be false first!")
+ }
+ c.Fail()
+ if !c.Failed() {
+ critical("c.Fail() didn't put the test in a failed state!")
+ }
+ c.Succeed()
+}
+
+func (s *BootstrapS) TestFailedAndSucceed(c *check.C) {
+ c.Fail()
+ c.Succeed()
+ if c.Failed() {
+ critical("c.Succeed() didn't put the test back in a non-failed state")
+ }
+}
+
+func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) {
+ c.Log("Hello there!")
+ log := c.GetTestLog()
+ if log != "Hello there!\n" {
+ critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log))
+ }
+}
+
+func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) {
+ c.Logf("Hello %v", "there!")
+ log := c.GetTestLog()
+ if log != "Hello there!\n" {
+ critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log))
+ }
+}
+
+func (s *BootstrapS) TestRunShowsErrors(c *check.C) {
+ output := String{}
+ check.Run(&FailHelper{}, &check.RunConf{Output: &output})
+ if strings.Index(output.value, "Expected failure!") == -1 {
+ critical(fmt.Sprintf("RunWithWriter() output did not contain the "+
+ "expected failure! Got: %#v",
+ output.value))
+ }
+}
+
+func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) {
+ output := String{}
+ check.Run(&SuccessHelper{}, &check.RunConf{Output: &output})
+ if strings.Index(output.value, "Expected success!") != -1 {
+ critical(fmt.Sprintf("RunWithWriter() output contained a successful "+
+ "test! Got: %#v",
+ output.value))
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/check.go b/Godeps/_workspace/src/gopkg.in/check.v1/check.go
new file mode 100644
index 00000000..ca8c0f92
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/check.go
@@ -0,0 +1,945 @@
+// Package check is a rich testing extension for Go's testing package.
+//
+// For details about the project, see:
+//
+// http://labix.org/gocheck
+//
+package check
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// -----------------------------------------------------------------------
+// Internal type which deals with suite method calling.
+
+const (
+ fixtureKd = iota
+ testKd
+)
+
+type funcKind int
+
+const (
+ succeededSt = iota
+ failedSt
+ skippedSt
+ panickedSt
+ fixturePanickedSt
+ missedSt
+)
+
+type funcStatus int
+
+// A method value can't reach its own Method structure.
+type methodType struct {
+ reflect.Value
+ Info reflect.Method
+}
+
+func newMethod(receiver reflect.Value, i int) *methodType {
+ return &methodType{receiver.Method(i), receiver.Type().Method(i)}
+}
+
+func (method *methodType) PC() uintptr {
+ return method.Info.Func.Pointer()
+}
+
+func (method *methodType) suiteName() string {
+ t := method.Info.Type.In(0)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t.Name()
+}
+
+func (method *methodType) String() string {
+ return method.suiteName() + "." + method.Info.Name
+}
+
+func (method *methodType) matches(re *regexp.Regexp) bool {
+ return (re.MatchString(method.Info.Name) ||
+ re.MatchString(method.suiteName()) ||
+ re.MatchString(method.String()))
+}
+
+type C struct {
+ method *methodType
+ kind funcKind
+ testName string
+ status funcStatus
+ logb *logger
+ logw io.Writer
+ done chan *C
+ reason string
+ mustFail bool
+ tempDir *tempDir
+ benchMem bool
+ startTime time.Time
+ timer
+}
+
+func (c *C) stopNow() {
+ runtime.Goexit()
+}
+
+// logger is a concurrency safe byte.Buffer
+type logger struct {
+ sync.Mutex
+ writer bytes.Buffer
+}
+
+func (l *logger) Write(buf []byte) (int, error) {
+ l.Lock()
+ defer l.Unlock()
+ return l.writer.Write(buf)
+}
+
+func (l *logger) WriteTo(w io.Writer) (int64, error) {
+ l.Lock()
+ defer l.Unlock()
+ return l.writer.WriteTo(w)
+}
+
+func (l *logger) String() string {
+ l.Lock()
+ defer l.Unlock()
+ return l.writer.String()
+}
+
+// -----------------------------------------------------------------------
+// Handling of temporary files and directories.
+
+type tempDir struct {
+ sync.Mutex
+ path string
+ counter int
+}
+
+func (td *tempDir) newPath() string {
+ td.Lock()
+ defer td.Unlock()
+ if td.path == "" {
+ var err error
+ for i := 0; i != 100; i++ {
+ path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
+ if err = os.Mkdir(path, 0700); err == nil {
+ td.path = path
+ break
+ }
+ }
+ if td.path == "" {
+ panic("Couldn't create temporary directory: " + err.Error())
+ }
+ }
+ result := filepath.Join(td.path, strconv.Itoa(td.counter))
+ td.counter += 1
+ return result
+}
+
+func (td *tempDir) removeAll() {
+ td.Lock()
+ defer td.Unlock()
+ if td.path != "" {
+ err := os.RemoveAll(td.path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
+ }
+ }
+}
+
+// Create a new temporary directory which is automatically removed after
+// the suite finishes running.
+func (c *C) MkDir() string {
+ path := c.tempDir.newPath()
+ if err := os.Mkdir(path, 0700); err != nil {
+ panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
+ }
+ return path
+}
+
+// -----------------------------------------------------------------------
+// Low-level logging functions.
+
+func (c *C) log(args ...interface{}) {
+ c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
+}
+
+func (c *C) logf(format string, args ...interface{}) {
+ c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
+}
+
+func (c *C) logNewLine() {
+ c.writeLog([]byte{'\n'})
+}
+
+func (c *C) writeLog(buf []byte) {
+ c.logb.Write(buf)
+ if c.logw != nil {
+ c.logw.Write(buf)
+ }
+}
+
+func hasStringOrError(x interface{}) (ok bool) {
+ _, ok = x.(fmt.Stringer)
+ if ok {
+ return
+ }
+ _, ok = x.(error)
+ return
+}
+
+func (c *C) logValue(label string, value interface{}) {
+ if label == "" {
+ if hasStringOrError(value) {
+ c.logf("... %#v (%q)", value, value)
+ } else {
+ c.logf("... %#v", value)
+ }
+ } else if value == nil {
+ c.logf("... %s = nil", label)
+ } else {
+ if hasStringOrError(value) {
+ fv := fmt.Sprintf("%#v", value)
+ qv := fmt.Sprintf("%q", value)
+ if fv != qv {
+ c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
+ return
+ }
+ }
+ if s, ok := value.(string); ok && isMultiLine(s) {
+ c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
+ c.logMultiLine(s)
+ } else {
+ c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
+ }
+ }
+}
+
+func (c *C) logMultiLine(s string) {
+ b := make([]byte, 0, len(s)*2)
+ i := 0
+ n := len(s)
+ for i < n {
+ j := i + 1
+ for j < n && s[j-1] != '\n' {
+ j++
+ }
+ b = append(b, "... "...)
+ b = strconv.AppendQuote(b, s[i:j])
+ if j < n {
+ b = append(b, " +"...)
+ }
+ b = append(b, '\n')
+ i = j
+ }
+ c.writeLog(b)
+}
+
+func isMultiLine(s string) bool {
+ for i := 0; i+1 < len(s); i++ {
+ if s[i] == '\n' {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *C) logString(issue string) {
+ c.log("... ", issue)
+}
+
+func (c *C) logCaller(skip int) {
+ // This is a bit heavier than it ought to be.
+ skip += 1 // Our own frame.
+ pc, callerFile, callerLine, ok := runtime.Caller(skip)
+ if !ok {
+ return
+ }
+ var testFile string
+ var testLine int
+ testFunc := runtime.FuncForPC(c.method.PC())
+ if runtime.FuncForPC(pc) != testFunc {
+ for {
+ skip += 1
+ if pc, file, line, ok := runtime.Caller(skip); ok {
+ // Note that the test line may be different on
+ // distinct calls for the same test. Showing
+ // the "internal" line is helpful when debugging.
+ if runtime.FuncForPC(pc) == testFunc {
+ testFile, testLine = file, line
+ break
+ }
+ } else {
+ break
+ }
+ }
+ }
+ if testFile != "" && (testFile != callerFile || testLine != callerLine) {
+ c.logCode(testFile, testLine)
+ }
+ c.logCode(callerFile, callerLine)
+}
+
+func (c *C) logCode(path string, line int) {
+ c.logf("%s:%d:", nicePath(path), line)
+ code, err := printLine(path, line)
+ if code == "" {
+ code = "..." // XXX Open the file and take the raw line.
+ if err != nil {
+ code += err.Error()
+ }
+ }
+ c.log(indent(code, " "))
+}
+
+var valueGo = filepath.Join("reflect", "value.go")
+var asmGo = filepath.Join("runtime", "asm_")
+
+func (c *C) logPanic(skip int, value interface{}) {
+ skip++ // Our own frame.
+ initialSkip := skip
+ for ; ; skip++ {
+ if pc, file, line, ok := runtime.Caller(skip); ok {
+ if skip == initialSkip {
+ c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
+ }
+ name := niceFuncName(pc)
+ path := nicePath(file)
+ if strings.Contains(path, "/gopkg.in/check.v") {
+ continue
+ }
+ if name == "Value.call" && strings.HasSuffix(path, valueGo) {
+ continue
+ }
+ if name == "call16" && strings.Contains(path, asmGo) {
+ continue
+ }
+ c.logf("%s:%d\n in %s", nicePath(file), line, name)
+ } else {
+ break
+ }
+ }
+}
+
+func (c *C) logSoftPanic(issue string) {
+ c.log("... Panic: ", issue)
+}
+
+func (c *C) logArgPanic(method *methodType, expectedType string) {
+ c.logf("... Panic: %s argument should be %s",
+ niceFuncName(method.PC()), expectedType)
+}
+
+// -----------------------------------------------------------------------
+// Some simple formatting helpers.
+
+var initWD, initWDErr = os.Getwd()
+
+func init() {
+ if initWDErr == nil {
+ initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
+ }
+}
+
+func nicePath(path string) string {
+ if initWDErr == nil {
+ if strings.HasPrefix(path, initWD) {
+ return path[len(initWD):]
+ }
+ }
+ return path
+}
+
+func niceFuncPath(pc uintptr) string {
+ function := runtime.FuncForPC(pc)
+ if function != nil {
+ filename, line := function.FileLine(pc)
+ return fmt.Sprintf("%s:%d", nicePath(filename), line)
+ }
+ return ""
+}
+
+func niceFuncName(pc uintptr) string {
+ function := runtime.FuncForPC(pc)
+ if function != nil {
+ name := path.Base(function.Name())
+ if i := strings.Index(name, "."); i > 0 {
+ name = name[i+1:]
+ }
+ if strings.HasPrefix(name, "(*") {
+ if i := strings.Index(name, ")"); i > 0 {
+ name = name[2:i] + name[i+1:]
+ }
+ }
+ if i := strings.LastIndex(name, ".*"); i != -1 {
+ name = name[:i] + "." + name[i+2:]
+ }
+ if i := strings.LastIndex(name, "·"); i != -1 {
+ name = name[:i] + "." + name[i+2:]
+ }
+ return name
+ }
+ return ""
+}
+
+// -----------------------------------------------------------------------
+// Result tracker to aggregate call results.
+
+type Result struct {
+ Succeeded int
+ Failed int
+ Skipped int
+ Panicked int
+ FixturePanicked int
+ ExpectedFailures int
+ Missed int // Not even tried to run, related to a panic in the fixture.
+ RunError error // Houston, we've got a problem.
+ WorkDir string // If KeepWorkDir is true
+}
+
+type resultTracker struct {
+ result Result
+ _lastWasProblem bool
+ _waiting int
+ _missed int
+ _expectChan chan *C
+ _doneChan chan *C
+ _stopChan chan bool
+}
+
+func newResultTracker() *resultTracker {
+ return &resultTracker{_expectChan: make(chan *C), // Synchronous
+ _doneChan: make(chan *C, 32), // Asynchronous
+ _stopChan: make(chan bool)} // Synchronous
+}
+
+func (tracker *resultTracker) start() {
+ go tracker._loopRoutine()
+}
+
+func (tracker *resultTracker) waitAndStop() {
+ <-tracker._stopChan
+}
+
+func (tracker *resultTracker) expectCall(c *C) {
+ tracker._expectChan <- c
+}
+
+func (tracker *resultTracker) callDone(c *C) {
+ tracker._doneChan <- c
+}
+
+func (tracker *resultTracker) _loopRoutine() {
+ for {
+ var c *C
+ if tracker._waiting > 0 {
+ // Calls still running. Can't stop.
+ select {
+ // XXX Reindent this (not now to make diff clear)
+ case c = <-tracker._expectChan:
+ tracker._waiting += 1
+ case c = <-tracker._doneChan:
+ tracker._waiting -= 1
+ switch c.status {
+ case succeededSt:
+ if c.kind == testKd {
+ if c.mustFail {
+ tracker.result.ExpectedFailures++
+ } else {
+ tracker.result.Succeeded++
+ }
+ }
+ case failedSt:
+ tracker.result.Failed++
+ case panickedSt:
+ if c.kind == fixtureKd {
+ tracker.result.FixturePanicked++
+ } else {
+ tracker.result.Panicked++
+ }
+ case fixturePanickedSt:
+ // Track it as missed, since the panic
+ // was on the fixture, not on the test.
+ tracker.result.Missed++
+ case missedSt:
+ tracker.result.Missed++
+ case skippedSt:
+ if c.kind == testKd {
+ tracker.result.Skipped++
+ }
+ }
+ }
+ } else {
+ // No calls. Can stop, but no done calls here.
+ select {
+ case tracker._stopChan <- true:
+ return
+ case c = <-tracker._expectChan:
+ tracker._waiting += 1
+ case c = <-tracker._doneChan:
+ panic("Tracker got an unexpected done call.")
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------
+// The underlying suite runner.
+
+type suiteRunner struct {
+ suite interface{}
+ setUpSuite, tearDownSuite *methodType
+ setUpTest, tearDownTest *methodType
+ tests []*methodType
+ tracker *resultTracker
+ tempDir *tempDir
+ keepDir bool
+ output *outputWriter
+ reportedProblemLast bool
+ benchTime time.Duration
+ benchMem bool
+}
+
+type RunConf struct {
+ Output io.Writer
+ Stream bool
+ Verbose bool
+ Filter string
+ Benchmark bool
+ BenchmarkTime time.Duration // Defaults to 1 second
+ BenchmarkMem bool
+ KeepWorkDir bool
+}
+
+// Create a new suiteRunner able to run all methods in the given suite.
+func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
+ var conf RunConf
+ if runConf != nil {
+ conf = *runConf
+ }
+ if conf.Output == nil {
+ conf.Output = os.Stdout
+ }
+ if conf.Benchmark {
+ conf.Verbose = true
+ }
+
+ suiteType := reflect.TypeOf(suite)
+ suiteNumMethods := suiteType.NumMethod()
+ suiteValue := reflect.ValueOf(suite)
+
+ runner := &suiteRunner{
+ suite: suite,
+ output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
+ tracker: newResultTracker(),
+ benchTime: conf.BenchmarkTime,
+ benchMem: conf.BenchmarkMem,
+ tempDir: &tempDir{},
+ keepDir: conf.KeepWorkDir,
+ tests: make([]*methodType, 0, suiteNumMethods),
+ }
+ if runner.benchTime == 0 {
+ runner.benchTime = 1 * time.Second
+ }
+
+ var filterRegexp *regexp.Regexp
+ if conf.Filter != "" {
+ if regexp, err := regexp.Compile(conf.Filter); err != nil {
+ msg := "Bad filter expression: " + err.Error()
+ runner.tracker.result.RunError = errors.New(msg)
+ return runner
+ } else {
+ filterRegexp = regexp
+ }
+ }
+
+ for i := 0; i != suiteNumMethods; i++ {
+ method := newMethod(suiteValue, i)
+ switch method.Info.Name {
+ case "SetUpSuite":
+ runner.setUpSuite = method
+ case "TearDownSuite":
+ runner.tearDownSuite = method
+ case "SetUpTest":
+ runner.setUpTest = method
+ case "TearDownTest":
+ runner.tearDownTest = method
+ default:
+ prefix := "Test"
+ if conf.Benchmark {
+ prefix = "Benchmark"
+ }
+ if !strings.HasPrefix(method.Info.Name, prefix) {
+ continue
+ }
+ if filterRegexp == nil || method.matches(filterRegexp) {
+ runner.tests = append(runner.tests, method)
+ }
+ }
+ }
+ return runner
+}
+
+// Run all methods in the given suite.
+func (runner *suiteRunner) run() *Result {
+ if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
+ runner.tracker.start()
+ if runner.checkFixtureArgs() {
+ c := runner.runFixture(runner.setUpSuite, "", nil)
+ if c == nil || c.status == succeededSt {
+ for i := 0; i != len(runner.tests); i++ {
+ c := runner.runTest(runner.tests[i])
+ if c.status == fixturePanickedSt {
+ runner.skipTests(missedSt, runner.tests[i+1:])
+ break
+ }
+ }
+ } else if c != nil && c.status == skippedSt {
+ runner.skipTests(skippedSt, runner.tests)
+ } else {
+ runner.skipTests(missedSt, runner.tests)
+ }
+ runner.runFixture(runner.tearDownSuite, "", nil)
+ } else {
+ runner.skipTests(missedSt, runner.tests)
+ }
+ runner.tracker.waitAndStop()
+ if runner.keepDir {
+ runner.tracker.result.WorkDir = runner.tempDir.path
+ } else {
+ runner.tempDir.removeAll()
+ }
+ }
+ return &runner.tracker.result
+}
+
+// Create a call object with the given suite method, and fork a
+// goroutine with the provided dispatcher for running it.
+func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
+ var logw io.Writer
+ if runner.output.Stream {
+ logw = runner.output
+ }
+ if logb == nil {
+ logb = new(logger)
+ }
+ c := &C{
+ method: method,
+ kind: kind,
+ testName: testName,
+ logb: logb,
+ logw: logw,
+ tempDir: runner.tempDir,
+ done: make(chan *C, 1),
+ timer: timer{benchTime: runner.benchTime},
+ startTime: time.Now(),
+ benchMem: runner.benchMem,
+ }
+ runner.tracker.expectCall(c)
+ go (func() {
+ runner.reportCallStarted(c)
+ defer runner.callDone(c)
+ dispatcher(c)
+ })()
+ return c
+}
+
+// Same as forkCall(), but wait for call to finish before returning.
+func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
+ c := runner.forkCall(method, kind, testName, logb, dispatcher)
+ <-c.done
+ return c
+}
+
+// Handle a finished call. If there were any panics, update the call status
+// accordingly. Then, mark the call as done and report to the tracker.
+func (runner *suiteRunner) callDone(c *C) {
+ value := recover()
+ if value != nil {
+ switch v := value.(type) {
+ case *fixturePanic:
+ if v.status == skippedSt {
+ c.status = skippedSt
+ } else {
+ c.logSoftPanic("Fixture has panicked (see related PANIC)")
+ c.status = fixturePanickedSt
+ }
+ default:
+ c.logPanic(1, value)
+ c.status = panickedSt
+ }
+ }
+ if c.mustFail {
+ switch c.status {
+ case failedSt:
+ c.status = succeededSt
+ case succeededSt:
+ c.status = failedSt
+ c.logString("Error: Test succeeded, but was expected to fail")
+ c.logString("Reason: " + c.reason)
+ }
+ }
+
+ runner.reportCallDone(c)
+ c.done <- c
+}
+
+// Runs a fixture call synchronously. The fixture will still be run in a
+// goroutine like all suite methods, but this method will not return
+// while the fixture goroutine is not done, because the fixture must be
+// run in a desired order.
+func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
+ if method != nil {
+ c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
+ c.ResetTimer()
+ c.StartTimer()
+ defer c.StopTimer()
+ c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+ })
+ return c
+ }
+ return nil
+}
+
+// Run the fixture method with runFixture(), but panic with a fixturePanic{}
+// in case the fixture method panics. This makes it easier to track the
+// fixture panic together with other call panics within forkTest().
+func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
+ if skipped != nil && *skipped {
+ return nil
+ }
+ c := runner.runFixture(method, testName, logb)
+ if c != nil && c.status != succeededSt {
+ if skipped != nil {
+ *skipped = c.status == skippedSt
+ }
+ panic(&fixturePanic{c.status, method})
+ }
+ return c
+}
+
+type fixturePanic struct {
+ status funcStatus
+ method *methodType
+}
+
+// Run the suite test method, together with the test-specific fixture,
+// asynchronously.
+func (runner *suiteRunner) forkTest(method *methodType) *C {
+ testName := method.String()
+ return runner.forkCall(method, testKd, testName, nil, func(c *C) {
+ var skipped bool
+ defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
+ defer c.StopTimer()
+ benchN := 1
+ for {
+ runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
+ mt := c.method.Type()
+ if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
+ // Rather than a plain panic, provide a more helpful message when
+ // the argument type is incorrect.
+ c.status = panickedSt
+ c.logArgPanic(c.method, "*check.C")
+ return
+ }
+ if strings.HasPrefix(c.method.Info.Name, "Test") {
+ c.ResetTimer()
+ c.StartTimer()
+ c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+ return
+ }
+ if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
+ panic("unexpected method prefix: " + c.method.Info.Name)
+ }
+
+ runtime.GC()
+ c.N = benchN
+ c.ResetTimer()
+ c.StartTimer()
+ c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+ c.StopTimer()
+ if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
+ return
+ }
+ perOpN := int(1e9)
+ if c.nsPerOp() != 0 {
+ perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
+ }
+
+ // Logic taken from the stock testing package:
+ // - Run more iterations than we think we'll need for a second (1.5x).
+ // - Don't grow too fast in case we had timing errors previously.
+ // - Be sure to run at least one more than last time.
+ benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
+ benchN = roundUp(benchN)
+
+ skipped = true // Don't run the deferred one if this panics.
+ runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
+ skipped = false
+ }
+ })
+}
+
+// Same as forkTest(), but wait for the test to finish before returning.
+func (runner *suiteRunner) runTest(method *methodType) *C {
+ c := runner.forkTest(method)
+ <-c.done
+ return c
+}
+
+// Helper to mark tests as skipped or missed. A bit heavy for what
+// it does, but it enables homogeneous handling of tracking, including
+// nice verbose output.
+func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
+ for _, method := range methods {
+ runner.runFunc(method, testKd, "", nil, func(c *C) {
+ c.status = status
+ })
+ }
+}
+
+// Verify if the fixture arguments are *check.C. In case of errors,
+// log the error as a panic in the fixture method call, and return false.
+func (runner *suiteRunner) checkFixtureArgs() bool {
+ succeeded := true
+ argType := reflect.TypeOf(&C{})
+ for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
+ if method != nil {
+ mt := method.Type()
+ if mt.NumIn() != 1 || mt.In(0) != argType {
+ succeeded = false
+ runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
+ c.logArgPanic(method, "*check.C")
+ c.status = panickedSt
+ })
+ }
+ }
+ }
+ return succeeded
+}
+
+func (runner *suiteRunner) reportCallStarted(c *C) {
+ runner.output.WriteCallStarted("START", c)
+}
+
+func (runner *suiteRunner) reportCallDone(c *C) {
+ runner.tracker.callDone(c)
+ switch c.status {
+ case succeededSt:
+ if c.mustFail {
+ runner.output.WriteCallSuccess("FAIL EXPECTED", c)
+ } else {
+ runner.output.WriteCallSuccess("PASS", c)
+ }
+ case skippedSt:
+ runner.output.WriteCallSuccess("SKIP", c)
+ case failedSt:
+ runner.output.WriteCallProblem("FAIL", c)
+ case panickedSt:
+ runner.output.WriteCallProblem("PANIC", c)
+ case fixturePanickedSt:
+ // That's a testKd call reporting that its fixture
+ // has panicked. The fixture call which caused the
+ // panic itself was tracked above. We'll report to
+ // aid debugging.
+ runner.output.WriteCallProblem("PANIC", c)
+ case missedSt:
+ runner.output.WriteCallSuccess("MISS", c)
+ }
+}
+
+// -----------------------------------------------------------------------
+// Output writer manages atomic output writing according to settings.
+
+type outputWriter struct {
+ m sync.Mutex
+ writer io.Writer
+ wroteCallProblemLast bool
+ Stream bool
+ Verbose bool
+}
+
+func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
+ return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
+}
+
+func (ow *outputWriter) Write(content []byte) (n int, err error) {
+ ow.m.Lock()
+ n, err = ow.writer.Write(content)
+ ow.m.Unlock()
+ return
+}
+
+func (ow *outputWriter) WriteCallStarted(label string, c *C) {
+ if ow.Stream {
+ header := renderCallHeader(label, c, "", "\n")
+ ow.m.Lock()
+ ow.writer.Write([]byte(header))
+ ow.m.Unlock()
+ }
+}
+
+func (ow *outputWriter) WriteCallProblem(label string, c *C) {
+ var prefix string
+ if !ow.Stream {
+ prefix = "\n-----------------------------------" +
+ "-----------------------------------\n"
+ }
+ header := renderCallHeader(label, c, prefix, "\n\n")
+ ow.m.Lock()
+ ow.wroteCallProblemLast = true
+ ow.writer.Write([]byte(header))
+ if !ow.Stream {
+ c.logb.WriteTo(ow.writer)
+ }
+ ow.m.Unlock()
+}
+
+func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
+ if ow.Stream || (ow.Verbose && c.kind == testKd) {
+ // TODO Use a buffer here.
+ var suffix string
+ if c.reason != "" {
+ suffix = " (" + c.reason + ")"
+ }
+ if c.status == succeededSt {
+ suffix += "\t" + c.timerString()
+ }
+ suffix += "\n"
+ if ow.Stream {
+ suffix += "\n"
+ }
+ header := renderCallHeader(label, c, "", suffix)
+ ow.m.Lock()
+ // Resist temptation of using line as prefix above due to race.
+ if !ow.Stream && ow.wroteCallProblemLast {
+ header = "\n-----------------------------------" +
+ "-----------------------------------\n" +
+ header
+ }
+ ow.wroteCallProblemLast = false
+ ow.writer.Write([]byte(header))
+ ow.m.Unlock()
+ }
+}
+
+func renderCallHeader(label string, c *C, prefix, suffix string) string {
+ pc := c.method.PC()
+ return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
+ niceFuncName(pc), suffix)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go
new file mode 100644
index 00000000..871b3252
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go
@@ -0,0 +1,207 @@
+// This file contains just a few generic helpers which are used by the
+// other test files.
+
+package check_test
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "testing"
+ "time"
+
+ "gopkg.in/check.v1"
+)
+
+// We count the number of suites run at least to get a vague hint that the
+// test suite is behaving as it should. Otherwise a bug introduced at the
+// very core of the system could go unperceived.
+const suitesRunExpected = 8
+
+var suitesRun int = 0
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+ if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" {
+ critical(fmt.Sprintf("Expected %d suites to run rather than %d",
+ suitesRunExpected, suitesRun))
+ }
+}
+
+// -----------------------------------------------------------------------
+// Helper functions.
+
+// Break down badly. This is used in test cases which can't yet assume
+// that the fundamental bits are working.
+func critical(error string) {
+ fmt.Fprintln(os.Stderr, "CRITICAL: "+error)
+ os.Exit(1)
+}
+
+// Return the file line where it's called.
+func getMyLine() int {
+ if _, _, line, ok := runtime.Caller(1); ok {
+ return line
+ }
+ return -1
+}
+
+// -----------------------------------------------------------------------
+// Helper type implementing a basic io.Writer for testing output.
+
+// Type implementing the io.Writer interface for analyzing output.
+type String struct {
+ value string
+}
+
+// The only function required by the io.Writer interface. Will append
+// written data to the String.value string.
+func (s *String) Write(p []byte) (n int, err error) {
+ s.value += string(p)
+ return len(p), nil
+}
+
+// Trivial wrapper to test errors happening on a different file
+// than the test itself.
+func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) {
+ return c.Check(obtained, check.Equals, expected), getMyLine()
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing basic fail behavior.
+
+type FailHelper struct {
+ testLine int
+}
+
+func (s *FailHelper) TestLogAndFail(c *check.C) {
+ s.testLine = getMyLine() - 1
+ c.Log("Expected failure!")
+ c.Fail()
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing basic success behavior.
+
+type SuccessHelper struct{}
+
+func (s *SuccessHelper) TestLogAndSucceed(c *check.C) {
+ c.Log("Expected success!")
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing ordering and behavior of fixture.
+
+type FixtureHelper struct {
+ calls []string
+ panicOn string
+ skip bool
+ skipOnN int
+ sleepOn string
+ sleep time.Duration
+ bytes int64
+}
+
+func (s *FixtureHelper) trace(name string, c *check.C) {
+ s.calls = append(s.calls, name)
+ if name == s.panicOn {
+ panic(name)
+ }
+ if s.sleep > 0 && s.sleepOn == name {
+ time.Sleep(s.sleep)
+ }
+ if s.skip && s.skipOnN == len(s.calls)-1 {
+ c.Skip("skipOnN == n")
+ }
+}
+
+func (s *FixtureHelper) SetUpSuite(c *check.C) {
+ s.trace("SetUpSuite", c)
+}
+
+func (s *FixtureHelper) TearDownSuite(c *check.C) {
+ s.trace("TearDownSuite", c)
+}
+
+func (s *FixtureHelper) SetUpTest(c *check.C) {
+ s.trace("SetUpTest", c)
+}
+
+func (s *FixtureHelper) TearDownTest(c *check.C) {
+ s.trace("TearDownTest", c)
+}
+
+func (s *FixtureHelper) Test1(c *check.C) {
+ s.trace("Test1", c)
+}
+
+func (s *FixtureHelper) Test2(c *check.C) {
+ s.trace("Test2", c)
+}
+
+func (s *FixtureHelper) Benchmark1(c *check.C) {
+ s.trace("Benchmark1", c)
+ for i := 0; i < c.N; i++ {
+ time.Sleep(s.sleep)
+ }
+}
+
+func (s *FixtureHelper) Benchmark2(c *check.C) {
+ s.trace("Benchmark2", c)
+ c.SetBytes(1024)
+ for i := 0; i < c.N; i++ {
+ time.Sleep(s.sleep)
+ }
+}
+
+func (s *FixtureHelper) Benchmark3(c *check.C) {
+ var x []int64
+ s.trace("Benchmark3", c)
+ for i := 0; i < c.N; i++ {
+ time.Sleep(s.sleep)
+ x = make([]int64, 5)
+ _ = x
+ }
+}
+
+// -----------------------------------------------------------------------
+// Helper which checks the state of the test and ensures that it matches
+// the given expectations. Depends on c.Errorf() working, so shouldn't
+// be used to test this one function.
+
+type expectedState struct {
+ name string
+ result interface{}
+ failed bool
+ log string
+}
+
+// Verify the state of the test. Note that since this also verifies if
+// the test is supposed to be in a failed state, no other checks should
+// be done in addition to what is being tested.
+func checkState(c *check.C, result interface{}, expected *expectedState) {
+ failed := c.Failed()
+ c.Succeed()
+ log := c.GetTestLog()
+ matched, matchError := regexp.MatchString("^"+expected.log+"$", log)
+ if matchError != nil {
+ c.Errorf("Error in matching expression used in testing %s",
+ expected.name)
+ } else if !matched {
+ c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------",
+ expected.name, log, expected.log)
+ }
+ if result != expected.result {
+ c.Errorf("%s returned %#v rather than %#v",
+ expected.name, result, expected.result)
+ }
+ if failed != expected.failed {
+ if failed {
+ c.Errorf("%s has failed when it shouldn't", expected.name)
+ } else {
+ c.Errorf("%s has not failed when it should", expected.name)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go b/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go
new file mode 100644
index 00000000..bac33872
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go
@@ -0,0 +1,458 @@
+package check
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+)
+
+// -----------------------------------------------------------------------
+// CommentInterface and Commentf helper, to attach extra information to checks.
+
+type comment struct {
+ format string
+ args []interface{}
+}
+
+// Commentf returns an infomational value to use with Assert or Check calls.
+// If the checker test fails, the provided arguments will be passed to
+// fmt.Sprintf, and will be presented next to the logged failure.
+//
+// For example:
+//
+// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
+//
+// Note that if the comment is constant, a better option is to
+// simply use a normal comment right above or next to the line, as
+// it will also get printed with any errors:
+//
+// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
+//
+func Commentf(format string, args ...interface{}) CommentInterface {
+ return &comment{format, args}
+}
+
+// CommentInterface must be implemented by types that attach extra
+// information to failed checks. See the Commentf function for details.
+type CommentInterface interface {
+ CheckCommentString() string
+}
+
+func (c *comment) CheckCommentString() string {
+ return fmt.Sprintf(c.format, c.args...)
+}
+
+// -----------------------------------------------------------------------
+// The Checker interface.
+
+// The Checker interface must be provided by checkers used with
+// the Assert and Check verification methods.
+type Checker interface {
+ Info() *CheckerInfo
+ Check(params []interface{}, names []string) (result bool, error string)
+}
+
+// See the Checker interface.
+type CheckerInfo struct {
+ Name string
+ Params []string
+}
+
+func (info *CheckerInfo) Info() *CheckerInfo {
+ return info
+}
+
+// -----------------------------------------------------------------------
+// Not checker logic inverter.
+
+// The Not checker inverts the logic of the provided checker. The
+// resulting checker will succeed where the original one failed, and
+// vice-versa.
+//
+// For example:
+//
+// c.Assert(a, Not(Equals), b)
+//
+func Not(checker Checker) Checker {
+ return ¬Checker{checker}
+}
+
+type notChecker struct {
+ sub Checker
+}
+
+func (checker *notChecker) Info() *CheckerInfo {
+ info := *checker.sub.Info()
+ info.Name = "Not(" + info.Name + ")"
+ return &info
+}
+
+func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ result, error = checker.sub.Check(params, names)
+ result = !result
+ return
+}
+
+// -----------------------------------------------------------------------
+// IsNil checker.
+
+type isNilChecker struct {
+ *CheckerInfo
+}
+
+// The IsNil checker tests whether the obtained value is nil.
+//
+// For example:
+//
+// c.Assert(err, IsNil)
+//
+var IsNil Checker = &isNilChecker{
+ &CheckerInfo{Name: "IsNil", Params: []string{"value"}},
+}
+
+func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return isNil(params[0]), ""
+}
+
+func isNil(obtained interface{}) (result bool) {
+ if obtained == nil {
+ result = true
+ } else {
+ switch v := reflect.ValueOf(obtained); v.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ }
+ return
+}
+
+// -----------------------------------------------------------------------
+// NotNil checker. Alias for Not(IsNil), since it's so common.
+
+type notNilChecker struct {
+ *CheckerInfo
+}
+
+// The NotNil checker verifies that the obtained value is not nil.
+//
+// For example:
+//
+// c.Assert(iface, NotNil)
+//
+// This is an alias for Not(IsNil), made available since it's a
+// fairly common check.
+//
+var NotNil Checker = ¬NilChecker{
+ &CheckerInfo{Name: "NotNil", Params: []string{"value"}},
+}
+
+func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return !isNil(params[0]), ""
+}
+
+// -----------------------------------------------------------------------
+// Equals checker.
+
+type equalsChecker struct {
+ *CheckerInfo
+}
+
+// The Equals checker verifies that the obtained value is equal to
+// the expected value, according to usual Go semantics for ==.
+//
+// For example:
+//
+// c.Assert(value, Equals, 42)
+//
+var Equals Checker = &equalsChecker{
+ &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
+}
+
+func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ defer func() {
+ if v := recover(); v != nil {
+ result = false
+ error = fmt.Sprint(v)
+ }
+ }()
+ return params[0] == params[1], ""
+}
+
+// -----------------------------------------------------------------------
+// DeepEquals checker.
+
+type deepEqualsChecker struct {
+ *CheckerInfo
+}
+
+// The DeepEquals checker verifies that the obtained value is deep-equal to
+// the expected value. The check will work correctly even when facing
+// slices, interfaces, and values of different types (which always fail
+// the test).
+//
+// For example:
+//
+// c.Assert(value, DeepEquals, 42)
+// c.Assert(array, DeepEquals, []string{"hi", "there"})
+//
+var DeepEquals Checker = &deepEqualsChecker{
+ &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
+}
+
+func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return reflect.DeepEqual(params[0], params[1]), ""
+}
+
+// -----------------------------------------------------------------------
+// HasLen checker.
+
+type hasLenChecker struct {
+ *CheckerInfo
+}
+
+// The HasLen checker verifies that the obtained value has the
+// provided length. In many cases this is superior to using Equals
+// in conjuction with the len function because in case the check
+// fails the value itself will be printed, instead of its length,
+// providing more details for figuring the problem.
+//
+// For example:
+//
+// c.Assert(list, HasLen, 5)
+//
+var HasLen Checker = &hasLenChecker{
+ &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
+}
+
+func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ n, ok := params[1].(int)
+ if !ok {
+ return false, "n must be an int"
+ }
+ value := reflect.ValueOf(params[0])
+ switch value.Kind() {
+ case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
+ default:
+ return false, "obtained value type has no length"
+ }
+ return value.Len() == n, ""
+}
+
+// -----------------------------------------------------------------------
+// ErrorMatches checker.
+
+type errorMatchesChecker struct {
+ *CheckerInfo
+}
+
+// The ErrorMatches checker verifies that the error value
+// is non nil and matches the regular expression provided.
+//
+// For example:
+//
+// c.Assert(err, ErrorMatches, "perm.*denied")
+//
+var ErrorMatches Checker = errorMatchesChecker{
+ &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
+}
+
+func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
+ if params[0] == nil {
+ return false, "Error value is nil"
+ }
+ err, ok := params[0].(error)
+ if !ok {
+ return false, "Value is not an error"
+ }
+ params[0] = err.Error()
+ names[0] = "error"
+ return matches(params[0], params[1])
+}
+
+// -----------------------------------------------------------------------
+// Matches checker.
+
+type matchesChecker struct {
+ *CheckerInfo
+}
+
+// The Matches checker verifies that the string provided as the obtained
+// value (or the string resulting from obtained.String()) matches the
+// regular expression provided.
+//
+// For example:
+//
+// c.Assert(err, Matches, "perm.*denied")
+//
+var Matches Checker = &matchesChecker{
+ &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
+}
+
+func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return matches(params[0], params[1])
+}
+
+func matches(value, regex interface{}) (result bool, error string) {
+ reStr, ok := regex.(string)
+ if !ok {
+ return false, "Regex must be a string"
+ }
+ valueStr, valueIsStr := value.(string)
+ if !valueIsStr {
+ if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
+ valueStr, valueIsStr = valueWithStr.String(), true
+ }
+ }
+ if valueIsStr {
+ matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
+ if err != nil {
+ return false, "Can't compile regex: " + err.Error()
+ }
+ return matches, ""
+ }
+ return false, "Obtained value is not a string and has no .String()"
+}
+
+// -----------------------------------------------------------------------
+// Panics checker.
+
+type panicsChecker struct {
+ *CheckerInfo
+}
+
+// The Panics checker verifies that calling the provided zero-argument
+// function will cause a panic which is deep-equal to the provided value.
+//
+// For example:
+//
+// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
+//
+//
+var Panics Checker = &panicsChecker{
+ &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
+}
+
+func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ f := reflect.ValueOf(params[0])
+ if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
+ return false, "Function must take zero arguments"
+ }
+ defer func() {
+ // If the function has not panicked, then don't do the check.
+ if error != "" {
+ return
+ }
+ params[0] = recover()
+ names[0] = "panic"
+ result = reflect.DeepEqual(params[0], params[1])
+ }()
+ f.Call(nil)
+ return false, "Function has not panicked"
+}
+
+type panicMatchesChecker struct {
+ *CheckerInfo
+}
+
+// The PanicMatches checker verifies that calling the provided zero-argument
+// function will cause a panic with an error value matching
+// the regular expression provided.
+//
+// For example:
+//
+// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
+//
+//
+var PanicMatches Checker = &panicMatchesChecker{
+ &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
+}
+
+func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
+ f := reflect.ValueOf(params[0])
+ if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
+ return false, "Function must take zero arguments"
+ }
+ defer func() {
+ // If the function has not panicked, then don't do the check.
+ if errmsg != "" {
+ return
+ }
+ obtained := recover()
+ names[0] = "panic"
+ if e, ok := obtained.(error); ok {
+ params[0] = e.Error()
+ } else if _, ok := obtained.(string); ok {
+ params[0] = obtained
+ } else {
+ errmsg = "Panic value is not a string or an error"
+ return
+ }
+ result, errmsg = matches(params[0], params[1])
+ }()
+ f.Call(nil)
+ return false, "Function has not panicked"
+}
+
+// -----------------------------------------------------------------------
+// FitsTypeOf checker.
+
+type fitsTypeChecker struct {
+ *CheckerInfo
+}
+
+// The FitsTypeOf checker verifies that the obtained value is
+// assignable to a variable with the same type as the provided
+// sample value.
+//
+// For example:
+//
+// c.Assert(value, FitsTypeOf, int64(0))
+// c.Assert(value, FitsTypeOf, os.Error(nil))
+//
+var FitsTypeOf Checker = &fitsTypeChecker{
+ &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
+}
+
+func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ obtained := reflect.ValueOf(params[0])
+ sample := reflect.ValueOf(params[1])
+ if !obtained.IsValid() {
+ return false, ""
+ }
+ if !sample.IsValid() {
+ return false, "Invalid sample value"
+ }
+ return obtained.Type().AssignableTo(sample.Type()), ""
+}
+
+// -----------------------------------------------------------------------
+// Implements checker.
+
+type implementsChecker struct {
+ *CheckerInfo
+}
+
+// The Implements checker verifies that the obtained value
+// implements the interface specified via a pointer to an interface
+// variable.
+//
+// For example:
+//
+// var e os.Error
+// c.Assert(err, Implements, &e)
+//
+var Implements Checker = &implementsChecker{
+ &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
+}
+
+func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ obtained := reflect.ValueOf(params[0])
+ ifaceptr := reflect.ValueOf(params[1])
+ if !obtained.IsValid() {
+ return false, ""
+ }
+ if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
+ return false, "ifaceptr should be a pointer to an interface variable"
+ }
+ return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go
new file mode 100644
index 00000000..5c697474
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go
@@ -0,0 +1,272 @@
+package check_test
+
+import (
+ "errors"
+ "gopkg.in/check.v1"
+ "reflect"
+ "runtime"
+)
+
+type CheckersS struct{}
+
+var _ = check.Suite(&CheckersS{})
+
+func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) {
+ info := checker.Info()
+ if info.Name != name {
+ c.Fatalf("Got name %s, expected %s", info.Name, name)
+ }
+ if !reflect.DeepEqual(info.Params, paramNames) {
+ c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames)
+ }
+}
+
+func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) {
+ info := checker.Info()
+ if len(params) != len(info.Params) {
+ c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params))
+ }
+ names := append([]string{}, info.Params...)
+ result_, error_ := checker.Check(params, names)
+ if result_ != result || error_ != error {
+ c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)",
+ info.Name, params, result_, error_, result, error)
+ }
+ return params, names
+}
+
+func (s *CheckersS) TestComment(c *check.C) {
+ bug := check.Commentf("a %d bc", 42)
+ comment := bug.CheckCommentString()
+ if comment != "a 42 bc" {
+ c.Fatalf("Commentf returned %#v", comment)
+ }
+}
+
+func (s *CheckersS) TestIsNil(c *check.C) {
+ testInfo(c, check.IsNil, "IsNil", []string{"value"})
+
+ testCheck(c, check.IsNil, true, "", nil)
+ testCheck(c, check.IsNil, false, "", "a")
+
+ testCheck(c, check.IsNil, true, "", (chan int)(nil))
+ testCheck(c, check.IsNil, false, "", make(chan int))
+ testCheck(c, check.IsNil, true, "", (error)(nil))
+ testCheck(c, check.IsNil, false, "", errors.New(""))
+ testCheck(c, check.IsNil, true, "", ([]int)(nil))
+ testCheck(c, check.IsNil, false, "", make([]int, 1))
+ testCheck(c, check.IsNil, false, "", int(0))
+}
+
+func (s *CheckersS) TestNotNil(c *check.C) {
+ testInfo(c, check.NotNil, "NotNil", []string{"value"})
+
+ testCheck(c, check.NotNil, false, "", nil)
+ testCheck(c, check.NotNil, true, "", "a")
+
+ testCheck(c, check.NotNil, false, "", (chan int)(nil))
+ testCheck(c, check.NotNil, true, "", make(chan int))
+ testCheck(c, check.NotNil, false, "", (error)(nil))
+ testCheck(c, check.NotNil, true, "", errors.New(""))
+ testCheck(c, check.NotNil, false, "", ([]int)(nil))
+ testCheck(c, check.NotNil, true, "", make([]int, 1))
+}
+
+func (s *CheckersS) TestNot(c *check.C) {
+ testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"})
+
+ testCheck(c, check.Not(check.IsNil), false, "", nil)
+ testCheck(c, check.Not(check.IsNil), true, "", "a")
+}
+
+type simpleStruct struct {
+ i int
+}
+
+func (s *CheckersS) TestEquals(c *check.C) {
+ testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"})
+
+ // The simplest.
+ testCheck(c, check.Equals, true, "", 42, 42)
+ testCheck(c, check.Equals, false, "", 42, 43)
+
+ // Different native types.
+ testCheck(c, check.Equals, false, "", int32(42), int64(42))
+
+ // With nil.
+ testCheck(c, check.Equals, false, "", 42, nil)
+
+ // Slices
+ testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2})
+
+ // Struct values
+ testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1})
+ testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2})
+
+ // Struct pointers
+ testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1})
+ testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2})
+}
+
+func (s *CheckersS) TestDeepEquals(c *check.C) {
+ testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"})
+
+ // The simplest.
+ testCheck(c, check.DeepEquals, true, "", 42, 42)
+ testCheck(c, check.DeepEquals, false, "", 42, 43)
+
+ // Different native types.
+ testCheck(c, check.DeepEquals, false, "", int32(42), int64(42))
+
+ // With nil.
+ testCheck(c, check.DeepEquals, false, "", 42, nil)
+
+ // Slices
+ testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2})
+ testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3})
+
+ // Struct values
+ testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1})
+ testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2})
+
+ // Struct pointers
+ testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1})
+ testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2})
+}
+
+func (s *CheckersS) TestHasLen(c *check.C) {
+ testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"})
+
+ testCheck(c, check.HasLen, true, "", "abcd", 4)
+ testCheck(c, check.HasLen, true, "", []int{1, 2}, 2)
+ testCheck(c, check.HasLen, false, "", []int{1, 2}, 3)
+
+ testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2")
+ testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2)
+}
+
+func (s *CheckersS) TestErrorMatches(c *check.C) {
+ testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"})
+
+ testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error")
+ testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error")
+ testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error")
+ testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or")
+
+ // Verify params mutation
+ params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error")
+ c.Assert(params[0], check.Equals, "some error")
+ c.Assert(names[0], check.Equals, "error")
+}
+
+func (s *CheckersS) TestMatches(c *check.C) {
+ testInfo(c, check.Matches, "Matches", []string{"value", "regex"})
+
+ // Simple matching
+ testCheck(c, check.Matches, true, "", "abc", "abc")
+ testCheck(c, check.Matches, true, "", "abc", "a.c")
+
+ // Must match fully
+ testCheck(c, check.Matches, false, "", "abc", "ab")
+ testCheck(c, check.Matches, false, "", "abc", "bc")
+
+ // String()-enabled values accepted
+ testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c")
+ testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d")
+
+ // Some error conditions.
+ testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c")
+ testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c")
+}
+
+func (s *CheckersS) TestPanics(c *check.C) {
+ testInfo(c, check.Panics, "Panics", []string{"function", "expected"})
+
+ // Some errors.
+ testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM")
+ testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM")
+
+ // Plain strings.
+ testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM")
+ testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM")
+ testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM")
+
+ // Error values.
+ testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM"))
+ testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
+
+ type deep struct{ i int }
+ // Deep value
+ testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99})
+
+ // Verify params/names mutation
+ params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
+ c.Assert(params[0], check.ErrorMatches, "KABOOM")
+ c.Assert(names[0], check.Equals, "panic")
+
+ // Verify a nil panic
+ testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil)
+ testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE")
+}
+
+func (s *CheckersS) TestPanicMatches(c *check.C) {
+ testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"})
+
+ // Error matching.
+ testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M")
+ testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M")
+
+ // Some errors.
+ testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM")
+ testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM")
+
+ // Plain strings.
+ testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M")
+ testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM")
+ testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M")
+
+ // Verify params/names mutation
+ params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM")
+ c.Assert(params[0], check.Equals, "KABOOM")
+ c.Assert(names[0], check.Equals, "panic")
+
+ // Verify a nil panic
+ testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "")
+}
+
+func (s *CheckersS) TestFitsTypeOf(c *check.C) {
+ testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"})
+
+ // Basic types
+ testCheck(c, check.FitsTypeOf, true, "", 1, 0)
+ testCheck(c, check.FitsTypeOf, false, "", 1, int64(0))
+
+ // Aliases
+ testCheck(c, check.FitsTypeOf, false, "", 1, errors.New(""))
+ testCheck(c, check.FitsTypeOf, false, "", "error", errors.New(""))
+ testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New(""))
+
+ // Structures
+ testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{})
+ testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{})
+ testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{})
+ testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{})
+
+ // Some bad values
+ testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil))
+ testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0)
+}
+
+func (s *CheckersS) TestImplements(c *check.C) {
+ testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"})
+
+ var e error
+ var re runtime.Error
+ testCheck(c, check.Implements, true, "", errors.New(""), &e)
+ testCheck(c, check.Implements, false, "", errors.New(""), &re)
+
+ // Some bad values
+ testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New(""))
+ testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil))
+ testCheck(c, check.Implements, false, "", interface{}(nil), &e)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go
new file mode 100644
index 00000000..0e6cfe0f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go
@@ -0,0 +1,9 @@
+package check
+
+func PrintLine(filename string, line int) (string, error) {
+ return printLine(filename, line)
+}
+
+func Indent(s, with string) string {
+ return indent(s, with)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go
new file mode 100644
index 00000000..2bff9e16
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go
@@ -0,0 +1,484 @@
+// Tests for the behavior of the test fixture system.
+
+package check_test
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+// -----------------------------------------------------------------------
+// Fixture test suite.
+
+type FixtureS struct{}
+
+var fixtureS = Suite(&FixtureS{})
+
+func (s *FixtureS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Basic fixture ordering verification.
+
+func (s *FixtureS) TestOrder(c *C) {
+ helper := FixtureHelper{}
+ Run(&helper, nil)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+// -----------------------------------------------------------------------
+// Check the behavior when panics occur within tests and fixtures.
+
+func (s *FixtureS) TestPanicOnTest(c *C) {
+ helper := FixtureHelper{panicOn: "Test1"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" +
+ "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.Test1\n" +
+ "(.|\n)*$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnSetUpTest(c *C) {
+ helper := FixtureHelper{panicOn: "SetUpTest"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "TearDownTest")
+ c.Check(helper.calls[3], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 4)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper\\.SetUpTest\n\n" +
+ "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.SetUpTest\n" +
+ "(.|\n)*" +
+ "\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: Fixture has panicked " +
+ "\\(see related PANIC\\)\n$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnTearDownTest(c *C) {
+ helper := FixtureHelper{panicOn: "TearDownTest"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 5)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper.TearDownTest\n\n" +
+ "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.TearDownTest\n" +
+ "(.|\n)*" +
+ "\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: Fixture has panicked " +
+ "\\(see related PANIC\\)\n$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnSetUpSuite(c *C) {
+ helper := FixtureHelper{panicOn: "SetUpSuite"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 2)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper.SetUpSuite\n\n" +
+ "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.SetUpSuite\n" +
+ "(.|\n)*$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnTearDownSuite(c *C) {
+ helper := FixtureHelper{panicOn: "TearDownSuite"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper.TearDownSuite\n\n" +
+ "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.TearDownSuite\n" +
+ "(.|\n)*$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// A wrong argument on a test or fixture will produce a nice error.
+
+func (s *FixtureS) TestPanicOnWrongTestArg(c *C) {
+ helper := WrongTestArgHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "TearDownTest")
+ c.Check(helper.calls[3], Equals, "SetUpTest")
+ c.Check(helper.calls[4], Equals, "Test2")
+ c.Check(helper.calls[5], Equals, "TearDownTest")
+ c.Check(helper.calls[6], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 7)
+
+ expected := "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongTestArgHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) {
+ helper := WrongSetUpTestArgHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpTestArgHelper\\.SetUpTest\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) {
+ helper := WrongSetUpSuiteArgHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Nice errors also when tests or fixture have wrong arg count.
+
+func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) {
+ helper := WrongTestArgCountHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "TearDownTest")
+ c.Check(helper.calls[3], Equals, "SetUpTest")
+ c.Check(helper.calls[4], Equals, "Test2")
+ c.Check(helper.calls[5], Equals, "TearDownTest")
+ c.Check(helper.calls[6], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 7)
+
+ expected := "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongTestArgCountHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) {
+ helper := WrongSetUpTestArgCountHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) {
+ helper := WrongSetUpSuiteArgCountHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" +
+ "\\.SetUpSuite argument should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Helper test suites with wrong function arguments.
+
+type WrongTestArgHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongTestArgHelper) Test1(t int) {
+}
+
+type WrongSetUpTestArgHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpTestArgHelper) SetUpTest(t int) {
+}
+
+type WrongSetUpSuiteArgHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) {
+}
+
+type WrongTestArgCountHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongTestArgCountHelper) Test1(c *C, i int) {
+}
+
+type WrongSetUpTestArgCountHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) {
+}
+
+type WrongSetUpSuiteArgCountHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) {
+}
+
+// -----------------------------------------------------------------------
+// Ensure fixture doesn't run without tests.
+
+type NoTestsHelper struct {
+ hasRun bool
+}
+
+func (s *NoTestsHelper) SetUpSuite(c *C) {
+ s.hasRun = true
+}
+
+func (s *NoTestsHelper) TearDownSuite(c *C) {
+ s.hasRun = true
+}
+
+func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) {
+ helper := NoTestsHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.hasRun, Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Verify that checks and assertions work correctly inside the fixture.
+
+type FixtureCheckHelper struct {
+ fail string
+ completed bool
+}
+
+func (s *FixtureCheckHelper) SetUpSuite(c *C) {
+ switch s.fail {
+ case "SetUpSuiteAssert":
+ c.Assert(false, Equals, true)
+ case "SetUpSuiteCheck":
+ c.Check(false, Equals, true)
+ }
+ s.completed = true
+}
+
+func (s *FixtureCheckHelper) SetUpTest(c *C) {
+ switch s.fail {
+ case "SetUpTestAssert":
+ c.Assert(false, Equals, true)
+ case "SetUpTestCheck":
+ c.Check(false, Equals, true)
+ }
+ s.completed = true
+}
+
+func (s *FixtureCheckHelper) Test(c *C) {
+ // Do nothing.
+}
+
+func (s *FixtureS) TestSetUpSuiteCheck(c *C) {
+ helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Matches,
+ "\n---+\n"+
+ "FAIL: fixture_test\\.go:[0-9]+: "+
+ "FixtureCheckHelper\\.SetUpSuite\n\n"+
+ "fixture_test\\.go:[0-9]+:\n"+
+ " c\\.Check\\(false, Equals, true\\)\n"+
+ "\\.+ obtained bool = false\n"+
+ "\\.+ expected bool = true\n\n")
+ c.Assert(helper.completed, Equals, true)
+}
+
+func (s *FixtureS) TestSetUpSuiteAssert(c *C) {
+ helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Matches,
+ "\n---+\n"+
+ "FAIL: fixture_test\\.go:[0-9]+: "+
+ "FixtureCheckHelper\\.SetUpSuite\n\n"+
+ "fixture_test\\.go:[0-9]+:\n"+
+ " c\\.Assert\\(false, Equals, true\\)\n"+
+ "\\.+ obtained bool = false\n"+
+ "\\.+ expected bool = true\n\n")
+ c.Assert(helper.completed, Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Verify that logging within SetUpTest() persists within the test log itself.
+
+type FixtureLogHelper struct {
+ c *C
+}
+
+func (s *FixtureLogHelper) SetUpTest(c *C) {
+ s.c = c
+ c.Log("1")
+}
+
+func (s *FixtureLogHelper) Test(c *C) {
+ c.Log("2")
+ s.c.Log("3")
+ c.Log("4")
+ c.Fail()
+}
+
+func (s *FixtureLogHelper) TearDownTest(c *C) {
+ s.c.Log("5")
+}
+
+func (s *FixtureS) TestFixtureLogging(c *C) {
+ helper := FixtureLogHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Matches,
+ "\n---+\n"+
+ "FAIL: fixture_test\\.go:[0-9]+: "+
+ "FixtureLogHelper\\.Test\n\n"+
+ "1\n2\n3\n4\n5\n")
+}
+
+// -----------------------------------------------------------------------
+// Skip() within fixture methods.
+
+func (s *FixtureS) TestSkipSuite(c *C) {
+ helper := FixtureHelper{skip: true, skipOnN: 0}
+ output := String{}
+ result := Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Equals, "")
+ c.Assert(helper.calls[0], Equals, "SetUpSuite")
+ c.Assert(helper.calls[1], Equals, "TearDownSuite")
+ c.Assert(len(helper.calls), Equals, 2)
+ c.Assert(result.Skipped, Equals, 2)
+}
+
+func (s *FixtureS) TestSkipTest(c *C) {
+ helper := FixtureHelper{skip: true, skipOnN: 1}
+ output := String{}
+ result := Run(&helper, &RunConf{Output: &output})
+ c.Assert(helper.calls[0], Equals, "SetUpSuite")
+ c.Assert(helper.calls[1], Equals, "SetUpTest")
+ c.Assert(helper.calls[2], Equals, "SetUpTest")
+ c.Assert(helper.calls[3], Equals, "Test2")
+ c.Assert(helper.calls[4], Equals, "TearDownTest")
+ c.Assert(helper.calls[5], Equals, "TearDownSuite")
+ c.Assert(len(helper.calls), Equals, 6)
+ c.Assert(result.Skipped, Equals, 1)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go
new file mode 100644
index 00000000..8ecf7915
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go
@@ -0,0 +1,335 @@
+// These tests check that the foundations of gocheck are working properly.
+// They already assume that fundamental failing is working already, though,
+// since this was tested in bootstrap_test.go. Even then, some care may
+// still have to be taken when using external functions, since they should
+// of course not rely on functionality tested here.
+
+package check_test
+
+import (
+ "fmt"
+ "gopkg.in/check.v1"
+ "log"
+ "os"
+ "regexp"
+ "strings"
+)
+
+// -----------------------------------------------------------------------
+// Foundation test suite.
+
+type FoundationS struct{}
+
+var foundationS = check.Suite(&FoundationS{})
+
+func (s *FoundationS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+func (s *FoundationS) TestErrorf(c *check.C) {
+ // Do not use checkState() here. It depends on Errorf() working.
+ expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c.Errorf(\"Error %%v!\", \"message\")\n"+
+ "... Error: Error message!\n\n",
+ getMyLine()+1)
+ c.Errorf("Error %v!", "message")
+ failed := c.Failed()
+ c.Succeed()
+ if log := c.GetTestLog(); log != expectedLog {
+ c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog)
+ c.Fail()
+ }
+ if !failed {
+ c.Logf("Errorf() didn't put the test in a failed state")
+ c.Fail()
+ }
+}
+
+func (s *FoundationS) TestError(c *check.C) {
+ expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c\\.Error\\(\"Error \", \"message!\"\\)\n"+
+ "\\.\\.\\. Error: Error message!\n\n",
+ getMyLine()+1)
+ c.Error("Error ", "message!")
+ checkState(c, nil,
+ &expectedState{
+ name: "Error(`Error `, `message!`)",
+ failed: true,
+ log: expectedLog,
+ })
+}
+
+func (s *FoundationS) TestFailNow(c *check.C) {
+ defer (func() {
+ if !c.Failed() {
+ c.Error("FailNow() didn't fail the test")
+ } else {
+ c.Succeed()
+ if c.GetTestLog() != "" {
+ c.Error("Something got logged:\n" + c.GetTestLog())
+ }
+ }
+ })()
+
+ c.FailNow()
+ c.Log("FailNow() didn't stop the test")
+}
+
+func (s *FoundationS) TestSucceedNow(c *check.C) {
+ defer (func() {
+ if c.Failed() {
+ c.Error("SucceedNow() didn't succeed the test")
+ }
+ if c.GetTestLog() != "" {
+ c.Error("Something got logged:\n" + c.GetTestLog())
+ }
+ })()
+
+ c.Fail()
+ c.SucceedNow()
+ c.Log("SucceedNow() didn't stop the test")
+}
+
+func (s *FoundationS) TestFailureHeader(c *check.C) {
+ output := String{}
+ failHelper := FailHelper{}
+ check.Run(&failHelper, &check.RunConf{Output: &output})
+ header := fmt.Sprintf(""+
+ "\n-----------------------------------"+
+ "-----------------------------------\n"+
+ "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n",
+ failHelper.testLine)
+ if strings.Index(output.value, header) == -1 {
+ c.Errorf(""+
+ "Failure didn't print a proper header.\n"+
+ "... Got:\n%s... Expected something with:\n%s",
+ output.value, header)
+ }
+}
+
+func (s *FoundationS) TestFatal(c *check.C) {
+ var line int
+ defer (func() {
+ if !c.Failed() {
+ c.Error("Fatal() didn't fail the test")
+ } else {
+ c.Succeed()
+ expected := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c.Fatal(\"Die \", \"now!\")\n"+
+ "... Error: Die now!\n\n",
+ line)
+ if c.GetTestLog() != expected {
+ c.Error("Incorrect log:", c.GetTestLog())
+ }
+ }
+ })()
+
+ line = getMyLine() + 1
+ c.Fatal("Die ", "now!")
+ c.Log("Fatal() didn't stop the test")
+}
+
+func (s *FoundationS) TestFatalf(c *check.C) {
+ var line int
+ defer (func() {
+ if !c.Failed() {
+ c.Error("Fatalf() didn't fail the test")
+ } else {
+ c.Succeed()
+ expected := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c.Fatalf(\"Die %%s!\", \"now\")\n"+
+ "... Error: Die now!\n\n",
+ line)
+ if c.GetTestLog() != expected {
+ c.Error("Incorrect log:", c.GetTestLog())
+ }
+ }
+ })()
+
+ line = getMyLine() + 1
+ c.Fatalf("Die %s!", "now")
+ c.Log("Fatalf() didn't stop the test")
+}
+
+func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) {
+ log := fmt.Sprintf(""+
+ "foundation_test.go:%d:\n"+
+ " result := c.Check\\(10, check.Equals, 20\\)\n"+
+ "\\.\\.\\. obtained int = 10\n"+
+ "\\.\\.\\. expected int = 20\n\n",
+ getMyLine()+1)
+ result := c.Check(10, check.Equals, 20)
+ checkState(c, result,
+ &expectedState{
+ name: "Check(10, Equals, 20)",
+ result: false,
+ failed: true,
+ log: log,
+ })
+}
+
+func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) {
+ result, line := checkEqualWrapper(c, 10, 20)
+ testLine := getMyLine() - 1
+ log := fmt.Sprintf(""+
+ "foundation_test.go:%d:\n"+
+ " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+
+ "check_test.go:%d:\n"+
+ " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+
+ "\\.\\.\\. obtained int = 10\n"+
+ "\\.\\.\\. expected int = 20\n\n",
+ testLine, line)
+ checkState(c, result,
+ &expectedState{
+ name: "Check(10, Equals, 20)",
+ result: false,
+ failed: true,
+ log: log,
+ })
+}
+
+// -----------------------------------------------------------------------
+// ExpectFailure() inverts the logic of failure.
+
+type ExpectFailureSucceedHelper struct{}
+
+func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) {
+ c.ExpectFailure("It booms!")
+ c.Error("Boom!")
+}
+
+type ExpectFailureFailHelper struct{}
+
+func (s *ExpectFailureFailHelper) TestFail(c *check.C) {
+ c.ExpectFailure("Bug #XYZ")
+}
+
+func (s *FoundationS) TestExpectFailureFail(c *check.C) {
+ helper := ExpectFailureFailHelper{}
+ output := String{}
+ result := check.Run(&helper, &check.RunConf{Output: &output})
+
+ expected := "" +
+ "^\n-+\n" +
+ "FAIL: foundation_test\\.go:[0-9]+:" +
+ " ExpectFailureFailHelper\\.TestFail\n\n" +
+ "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" +
+ "\\.\\.\\. Reason: Bug #XYZ\n$"
+
+ matched, err := regexp.MatchString(expected, output.value)
+ if err != nil {
+ c.Error("Bad expression: ", expected)
+ } else if !matched {
+ c.Error("ExpectFailure() didn't log properly:\n", output.value)
+ }
+
+ c.Assert(result.ExpectedFailures, check.Equals, 0)
+}
+
+func (s *FoundationS) TestExpectFailureSucceed(c *check.C) {
+ helper := ExpectFailureSucceedHelper{}
+ output := String{}
+ result := check.Run(&helper, &check.RunConf{Output: &output})
+
+ c.Assert(output.value, check.Equals, "")
+ c.Assert(result.ExpectedFailures, check.Equals, 1)
+}
+
+func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) {
+ helper := ExpectFailureSucceedHelper{}
+ output := String{}
+ result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
+
+ expected := "" +
+ "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" +
+ " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n"
+
+ matched, err := regexp.MatchString(expected, output.value)
+ if err != nil {
+ c.Error("Bad expression: ", expected)
+ } else if !matched {
+ c.Error("ExpectFailure() didn't log properly:\n", output.value)
+ }
+
+ c.Assert(result.ExpectedFailures, check.Equals, 1)
+}
+
+// -----------------------------------------------------------------------
+// Skip() allows stopping a test without positive/negative results.
+
+type SkipTestHelper struct{}
+
+func (s *SkipTestHelper) TestFail(c *check.C) {
+ c.Skip("Wrong platform or whatever")
+ c.Error("Boom!")
+}
+
+func (s *FoundationS) TestSkip(c *check.C) {
+ helper := SkipTestHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output})
+
+ if output.value != "" {
+ c.Error("Skip() logged something:\n", output.value)
+ }
+}
+
+func (s *FoundationS) TestSkipVerbose(c *check.C) {
+ helper := SkipTestHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
+
+ expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" +
+ " \\(Wrong platform or whatever\\)"
+ matched, err := regexp.MatchString(expected, output.value)
+ if err != nil {
+ c.Error("Bad expression: ", expected)
+ } else if !matched {
+ c.Error("Skip() didn't log properly:\n", output.value)
+ }
+}
+
+// -----------------------------------------------------------------------
+// Check minimum *log.Logger interface provided by *check.C.
+
+type minLogger interface {
+ Output(calldepth int, s string) error
+}
+
+func (s *BootstrapS) TestMinLogger(c *check.C) {
+ var logger minLogger
+ logger = log.New(os.Stderr, "", 0)
+ logger = c
+ logger.Output(0, "Hello there")
+ expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n`
+ output := c.GetTestLog()
+ c.Assert(output, check.Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Ensure that suites with embedded types are working fine, including the
+// the workaround for issue 906.
+
+type EmbeddedInternalS struct {
+ called bool
+}
+
+type EmbeddedS struct {
+ EmbeddedInternalS
+}
+
+var embeddedS = check.Suite(&EmbeddedS{})
+
+func (s *EmbeddedS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+func (s *EmbeddedInternalS) TestMethod(c *check.C) {
+ c.Error("TestMethod() of the embedded type was called!?")
+}
+
+func (s *EmbeddedS) TestMethod(c *check.C) {
+ // http://code.google.com/p/go/issues/detail?id=906
+ c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner?
+ s.called = true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go b/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go
new file mode 100644
index 00000000..4b6c26da
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go
@@ -0,0 +1,231 @@
+package check
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// TestName returns the current test name in the form "SuiteName.TestName"
+func (c *C) TestName() string {
+ return c.testName
+}
+
+// -----------------------------------------------------------------------
+// Basic succeeding/failing logic.
+
+// Failed returns whether the currently running test has already failed.
+func (c *C) Failed() bool {
+ return c.status == failedSt
+}
+
+// Fail marks the currently running test as failed.
+//
+// Something ought to have been previously logged so the developer can tell
+// what went wrong. The higher level helper functions will fail the test
+// and do the logging properly.
+func (c *C) Fail() {
+ c.status = failedSt
+}
+
+// FailNow marks the currently running test as failed and stops running it.
+// Something ought to have been previously logged so the developer can tell
+// what went wrong. The higher level helper functions will fail the test
+// and do the logging properly.
+func (c *C) FailNow() {
+ c.Fail()
+ c.stopNow()
+}
+
+// Succeed marks the currently running test as succeeded, undoing any
+// previous failures.
+func (c *C) Succeed() {
+ c.status = succeededSt
+}
+
+// SucceedNow marks the currently running test as succeeded, undoing any
+// previous failures, and stops running the test.
+func (c *C) SucceedNow() {
+ c.Succeed()
+ c.stopNow()
+}
+
+// ExpectFailure informs that the running test is knowingly broken for
+// the provided reason. If the test does not fail, an error will be reported
+// to raise attention to this fact. This method is useful to temporarily
+// disable tests which cover well known problems until a better time to
+// fix the problem is found, without forgetting about the fact that a
+// failure still exists.
+func (c *C) ExpectFailure(reason string) {
+ if reason == "" {
+ panic("Missing reason why the test is expected to fail")
+ }
+ c.mustFail = true
+ c.reason = reason
+}
+
+// Skip skips the running test for the provided reason. If run from within
+// SetUpTest, the individual test being set up will be skipped, and if run
+// from within SetUpSuite, the whole suite is skipped.
+func (c *C) Skip(reason string) {
+ if reason == "" {
+ panic("Missing reason why the test is being skipped")
+ }
+ c.reason = reason
+ c.status = skippedSt
+ c.stopNow()
+}
+
+// -----------------------------------------------------------------------
+// Basic logging.
+
+// GetTestLog returns the current test error output.
+func (c *C) GetTestLog() string {
+ return c.logb.String()
+}
+
+// Log logs some information into the test error output.
+// The provided arguments are assembled together into a string with fmt.Sprint.
+func (c *C) Log(args ...interface{}) {
+ c.log(args...)
+}
+
+// Log logs some information into the test error output.
+// The provided arguments are assembled together into a string with fmt.Sprintf.
+func (c *C) Logf(format string, args ...interface{}) {
+ c.logf(format, args...)
+}
+
+// Output enables *C to be used as a logger in functions that require only
+// the minimum interface of *log.Logger.
+func (c *C) Output(calldepth int, s string) error {
+ d := time.Now().Sub(c.startTime)
+ msec := d / time.Millisecond
+ sec := d / time.Second
+ min := d / time.Minute
+
+ c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
+ return nil
+}
+
+// Error logs an error into the test error output and marks the test as failed.
+// The provided arguments are assembled together into a string with fmt.Sprint.
+func (c *C) Error(args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
+ c.logNewLine()
+ c.Fail()
+}
+
+// Errorf logs an error into the test error output and marks the test as failed.
+// The provided arguments are assembled together into a string with fmt.Sprintf.
+func (c *C) Errorf(format string, args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprintf("Error: "+format, args...))
+ c.logNewLine()
+ c.Fail()
+}
+
+// Fatal logs an error into the test error output, marks the test as failed, and
+// stops the test execution. The provided arguments are assembled together into
+// a string with fmt.Sprint.
+func (c *C) Fatal(args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
+ c.logNewLine()
+ c.FailNow()
+}
+
+// Fatlaf logs an error into the test error output, marks the test as failed, and
+// stops the test execution. The provided arguments are assembled together into
+// a string with fmt.Sprintf.
+func (c *C) Fatalf(format string, args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
+ c.logNewLine()
+ c.FailNow()
+}
+
+// -----------------------------------------------------------------------
+// Generic checks and assertions based on checkers.
+
+// Check verifies if the first value matches the expected value according
+// to the provided checker. If they do not match, an error is logged, the
+// test is marked as failed, and the test execution continues.
+//
+// Some checkers may not need the expected argument (e.g. IsNil).
+//
+// Extra arguments provided to the function are logged next to the reported
+// problem when the matching fails.
+func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
+ return c.internalCheck("Check", obtained, checker, args...)
+}
+
+// Assert ensures that the first value matches the expected value according
+// to the provided checker. If they do not match, an error is logged, the
+// test is marked as failed, and the test execution stops.
+//
+// Some checkers may not need the expected argument (e.g. IsNil).
+//
+// Extra arguments provided to the function are logged next to the reported
+// problem when the matching fails.
+func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
+ if !c.internalCheck("Assert", obtained, checker, args...) {
+ c.stopNow()
+ }
+}
+
+func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
+ if checker == nil {
+ c.logCaller(2)
+ c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
+ c.logString("Oops.. you've provided a nil checker!")
+ c.logNewLine()
+ c.Fail()
+ return false
+ }
+
+ // If the last argument is a bug info, extract it out.
+ var comment CommentInterface
+ if len(args) > 0 {
+ if c, ok := args[len(args)-1].(CommentInterface); ok {
+ comment = c
+ args = args[:len(args)-1]
+ }
+ }
+
+ params := append([]interface{}{obtained}, args...)
+ info := checker.Info()
+
+ if len(params) != len(info.Params) {
+ names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
+ c.logCaller(2)
+ c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
+ c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
+ c.logNewLine()
+ c.Fail()
+ return false
+ }
+
+ // Copy since it may be mutated by Check.
+ names := append([]string{}, info.Params...)
+
+ // Do the actual check.
+ result, error := checker.Check(params, names)
+ if !result || error != "" {
+ c.logCaller(2)
+ for i := 0; i != len(params); i++ {
+ c.logValue(names[i], params[i])
+ }
+ if comment != nil {
+ c.logString(comment.CheckCommentString())
+ }
+ if error != "" {
+ c.logString(error)
+ }
+ c.logNewLine()
+ c.Fail()
+ return false
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go
new file mode 100644
index 00000000..4baa656b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go
@@ -0,0 +1,519 @@
+// These tests verify the inner workings of the helper methods associated
+// with check.T.
+
+package check_test
+
+import (
+ "gopkg.in/check.v1"
+ "os"
+ "reflect"
+ "runtime"
+ "sync"
+)
+
+var helpersS = check.Suite(&HelpersS{})
+
+type HelpersS struct{}
+
+func (s *HelpersS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Fake checker and bug info to verify the behavior of Assert() and Check().
+
+type MyChecker struct {
+ info *check.CheckerInfo
+ params []interface{}
+ names []string
+ result bool
+ error string
+}
+
+func (checker *MyChecker) Info() *check.CheckerInfo {
+ if checker.info == nil {
+ return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}}
+ }
+ return checker.info
+}
+
+func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) {
+ rparams := checker.params
+ rnames := checker.names
+ checker.params = append([]interface{}{}, params...)
+ checker.names = append([]string{}, names...)
+ if rparams != nil {
+ copy(params, rparams)
+ }
+ if rnames != nil {
+ copy(names, rnames)
+ }
+ return checker.result, checker.error
+}
+
+type myCommentType string
+
+func (c myCommentType) CheckCommentString() string {
+ return string(c)
+}
+
+func myComment(s string) myCommentType {
+ return myCommentType(s)
+}
+
+// -----------------------------------------------------------------------
+// Ensure a real checker actually works fine.
+
+func (s *HelpersS) TestCheckerInterface(c *check.C) {
+ testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} {
+ return c.Check(1, check.Equals, 1)
+ })
+}
+
+// -----------------------------------------------------------------------
+// Tests for Check(), mostly the same as for Assert() following these.
+
+func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ testHelperSuccess(c, "Check(1, checker)", true, func() interface{} {
+ return c.Check(1, checker)
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestCheckFailWithExpected(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n\n"
+ testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2, myComment("Hello world!"))
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " // Nice leading comment\\.\n" +
+ " return c\\.Check\\(1, checker, 2\\) // Hello there\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n\n"
+ testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
+ func() interface{} {
+ // Nice leading comment.
+ return c.Check(1, checker, 2) // Hello there
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker\\)\n" +
+ "\\.+ myvalue int = 1\n\n"
+ testHelperFailure(c, "Check(1, checker)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker)
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myvalue int = 1\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Check(1, checker, msg)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, myComment("Hello world!"))
+ })
+}
+
+func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker\\)\n" +
+ "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
+ "\\.+ Wrong number of parameters for MyChecker: " +
+ "want 3, got 2\n\n"
+ testHelperFailure(c, "Check(1, checker, !?)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker)
+ })
+}
+
+func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2, 3\\)\n" +
+ "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
+ "\\.+ Wrong number of parameters for MyChecker: " +
+ "want 3, got 4\n\n"
+ testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2, 3)
+ })
+}
+
+func (s *HelpersS) TestCheckWithError(c *check.C) {
+ checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Some not so cool data provided!\n\n"
+ testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+}
+
+func (s *HelpersS) TestCheckWithNilChecker(c *check.C) {
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, nil\\)\n" +
+ "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
+ "\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
+ testHelperFailure(c, "Check(obtained, nil)", false, false, log,
+ func() interface{} {
+ return c.Check(1, nil)
+ })
+}
+
+func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) {
+ checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2\\)\n" +
+ "\\.+ newobtained int = 3\n" +
+ "\\.+ newexpected int = 4\n\n"
+ testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+}
+
+// -----------------------------------------------------------------------
+// Tests for Assert(), mostly the same as for Check() above.
+
+func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} {
+ c.Assert(1, checker, 2)
+ return nil
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} {
+ c.Assert(1, checker)
+ return nil
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestAssertFailWithExpected(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n\n"
+ testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, 2)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, 2, myComment("Hello world!"))
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker\\)\n" +
+ "\\.+ myvalue int = 1\n\n"
+ testHelperFailure(c, "Assert(1, checker)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myvalue int = 1\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, myComment("Hello world!"))
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker\\)\n" +
+ "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" +
+ "\\.+ Wrong number of parameters for MyChecker: " +
+ "want 3, got 2\n\n"
+ testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertWithError(c *check.C) {
+ checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Some not so cool data provided!\n\n"
+ testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, 2)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertWithNilChecker(c *check.C) {
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, nil\\)\n" +
+ "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
+ "\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
+ testHelperFailure(c, "Assert(obtained, nil)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, nil)
+ return nil
+ })
+}
+
+// -----------------------------------------------------------------------
+// Ensure that values logged work properly in some interesting cases.
+
+func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+ " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" +
+ "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" +
+ "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n"
+ testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log,
+ func() interface{} {
+ return c.Check([]byte{1, 2}, checker, []byte{1, 3})
+ })
+}
+
+func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+ " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" +
+ "\\.+ myobtained string = \"\" \\+\n" +
+ "\\.+ \"a\\\\n\" \\+\n" +
+ "\\.+ \"b\\\\n\"\n" +
+ "\\.+ myexpected string = \"\" \\+\n" +
+ "\\.+ \"a\\\\n\" \\+\n" +
+ "\\.+ \"b\\\\n\" \\+\n" +
+ "\\.+ \"c\"\n\n"
+ testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log,
+ func() interface{} {
+ return c.Check("a\nb\n", checker, "a\nb\nc")
+ })
+}
+
+func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) {
+ // If the newline is at the end of the string, don't log as multi-line.
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+ " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" +
+ "\\.+ myobtained string = \"a b\\\\n\"\n" +
+ "\\.+ myexpected string = \"\" \\+\n" +
+ "\\.+ \"a\\\\n\" \\+\n" +
+ "\\.+ \"b\"\n\n"
+ testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log,
+ func() interface{} {
+ return c.Check("a b\n", checker, "a\nb")
+ })
+}
+
+// -----------------------------------------------------------------------
+// MakeDir() tests.
+
+type MkDirHelper struct {
+ path1 string
+ path2 string
+ isDir1 bool
+ isDir2 bool
+ isDir3 bool
+ isDir4 bool
+}
+
+func (s *MkDirHelper) SetUpSuite(c *check.C) {
+ s.path1 = c.MkDir()
+ s.isDir1 = isDir(s.path1)
+}
+
+func (s *MkDirHelper) Test(c *check.C) {
+ s.path2 = c.MkDir()
+ s.isDir2 = isDir(s.path2)
+}
+
+func (s *MkDirHelper) TearDownSuite(c *check.C) {
+ s.isDir3 = isDir(s.path1)
+ s.isDir4 = isDir(s.path2)
+}
+
+func (s *HelpersS) TestMkDir(c *check.C) {
+ helper := MkDirHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output})
+ c.Assert(output.value, check.Equals, "")
+ c.Check(helper.isDir1, check.Equals, true)
+ c.Check(helper.isDir2, check.Equals, true)
+ c.Check(helper.isDir3, check.Equals, true)
+ c.Check(helper.isDir4, check.Equals, true)
+ c.Check(helper.path1, check.Not(check.Equals),
+ helper.path2)
+ c.Check(isDir(helper.path1), check.Equals, false)
+ c.Check(isDir(helper.path2), check.Equals, false)
+}
+
+func isDir(path string) bool {
+ if stat, err := os.Stat(path); err == nil {
+ return stat.IsDir()
+ }
+ return false
+}
+
+// Concurrent logging should not corrupt the underling buffer.
+// Use go test -race to detect the race in this test.
+func (s *HelpersS) TestConcurrentLogging(c *check.C) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
+ var start, stop sync.WaitGroup
+ start.Add(1)
+ for i, n := 0, runtime.NumCPU()*2; i < n; i++ {
+ stop.Add(1)
+ go func(i int) {
+ start.Wait()
+ for j := 0; j < 30; j++ {
+ c.Logf("Worker %d: line %d", i, j)
+ }
+ stop.Done()
+ }(i)
+ }
+ start.Done()
+ stop.Wait()
+}
+
+// -----------------------------------------------------------------------
+// Test the TestName function
+
+type TestNameHelper struct {
+ name1 string
+ name2 string
+ name3 string
+ name4 string
+ name5 string
+}
+
+func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() }
+func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() }
+func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() }
+func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() }
+func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() }
+
+func (s *HelpersS) TestTestName(c *check.C) {
+ helper := TestNameHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output})
+ c.Check(helper.name1, check.Equals, "")
+ c.Check(helper.name2, check.Equals, "TestNameHelper.Test")
+ c.Check(helper.name3, check.Equals, "TestNameHelper.Test")
+ c.Check(helper.name4, check.Equals, "TestNameHelper.Test")
+ c.Check(helper.name5, check.Equals, "")
+}
+
+// -----------------------------------------------------------------------
+// A couple of helper functions to test helper functions. :-)
+
+func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) {
+ var result interface{}
+ defer (func() {
+ if err := recover(); err != nil {
+ panic(err)
+ }
+ checkState(c, result,
+ &expectedState{
+ name: name,
+ result: expectedResult,
+ failed: false,
+ log: "",
+ })
+ })()
+ result = closure()
+}
+
+func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) {
+ var result interface{}
+ defer (func() {
+ if err := recover(); err != nil {
+ panic(err)
+ }
+ checkState(c, result,
+ &expectedState{
+ name: name,
+ result: expectedResult,
+ failed: true,
+ log: log,
+ })
+ })()
+ result = closure()
+ if shouldStop {
+ c.Logf("%s didn't stop when it should", name)
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/printer.go b/Godeps/_workspace/src/gopkg.in/check.v1/printer.go
new file mode 100644
index 00000000..e0f7557b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/printer.go
@@ -0,0 +1,168 @@
+package check
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "os"
+)
+
+func indent(s, with string) (r string) {
+ eol := true
+ for i := 0; i != len(s); i++ {
+ c := s[i]
+ switch {
+ case eol && c == '\n' || c == '\r':
+ case c == '\n' || c == '\r':
+ eol = true
+ case eol:
+ eol = false
+ s = s[:i] + with + s[i:]
+ i += len(with)
+ }
+ }
+ return s
+}
+
+func printLine(filename string, line int) (string, error) {
+ fset := token.NewFileSet()
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
+ if err != nil {
+ return "", err
+ }
+ config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
+ lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
+ ast.Walk(lp, fnode)
+ result := lp.output.Bytes()
+ // Comments leave \n at the end.
+ n := len(result)
+ for n > 0 && result[n-1] == '\n' {
+ n--
+ }
+ return string(result[:n]), nil
+}
+
+type linePrinter struct {
+ config *printer.Config
+ fset *token.FileSet
+ fnode *ast.File
+ line int
+ output bytes.Buffer
+ stmt ast.Stmt
+}
+
+func (lp *linePrinter) emit() bool {
+ if lp.stmt != nil {
+ lp.trim(lp.stmt)
+ lp.printWithComments(lp.stmt)
+ lp.stmt = nil
+ return true
+ }
+ return false
+}
+
+func (lp *linePrinter) printWithComments(n ast.Node) {
+ nfirst := lp.fset.Position(n.Pos()).Line
+ nlast := lp.fset.Position(n.End()).Line
+ for _, g := range lp.fnode.Comments {
+ cfirst := lp.fset.Position(g.Pos()).Line
+ clast := lp.fset.Position(g.End()).Line
+ if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
+ for _, c := range g.List {
+ lp.output.WriteString(c.Text)
+ lp.output.WriteByte('\n')
+ }
+ }
+ if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
+ // The printer will not include the comment if it starts past
+ // the node itself. Trick it into printing by overlapping the
+ // slash with the end of the statement.
+ g.List[0].Slash = n.End() - 1
+ }
+ }
+ node := &printer.CommentedNode{n, lp.fnode.Comments}
+ lp.config.Fprint(&lp.output, lp.fset, node)
+}
+
+func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
+ if n == nil {
+ if lp.output.Len() == 0 {
+ lp.emit()
+ }
+ return nil
+ }
+ first := lp.fset.Position(n.Pos()).Line
+ last := lp.fset.Position(n.End()).Line
+ if first <= lp.line && last >= lp.line {
+ // Print the innermost statement containing the line.
+ if stmt, ok := n.(ast.Stmt); ok {
+ if _, ok := n.(*ast.BlockStmt); !ok {
+ lp.stmt = stmt
+ }
+ }
+ if first == lp.line && lp.emit() {
+ return nil
+ }
+ return lp
+ }
+ return nil
+}
+
+func (lp *linePrinter) trim(n ast.Node) bool {
+ stmt, ok := n.(ast.Stmt)
+ if !ok {
+ return true
+ }
+ line := lp.fset.Position(n.Pos()).Line
+ if line != lp.line {
+ return false
+ }
+ switch stmt := stmt.(type) {
+ case *ast.IfStmt:
+ stmt.Body = lp.trimBlock(stmt.Body)
+ case *ast.SwitchStmt:
+ stmt.Body = lp.trimBlock(stmt.Body)
+ case *ast.TypeSwitchStmt:
+ stmt.Body = lp.trimBlock(stmt.Body)
+ case *ast.CaseClause:
+ stmt.Body = lp.trimList(stmt.Body)
+ case *ast.CommClause:
+ stmt.Body = lp.trimList(stmt.Body)
+ case *ast.BlockStmt:
+ stmt.List = lp.trimList(stmt.List)
+ }
+ return true
+}
+
+func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
+ if !lp.trim(stmt) {
+ return lp.emptyBlock(stmt)
+ }
+ stmt.Rbrace = stmt.Lbrace
+ return stmt
+}
+
+func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
+ for i := 0; i != len(stmts); i++ {
+ if !lp.trim(stmts[i]) {
+ stmts[i] = lp.emptyStmt(stmts[i])
+ break
+ }
+ }
+ return stmts
+}
+
+func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
+ return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
+}
+
+func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
+ p := n.Pos()
+ return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go
new file mode 100644
index 00000000..538b2d52
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go
@@ -0,0 +1,104 @@
+package check_test
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&PrinterS{})
+
+type PrinterS struct{}
+
+func (s *PrinterS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+var printTestFuncLine int
+
+func init() {
+ printTestFuncLine = getMyLine() + 3
+}
+
+func printTestFunc() {
+ println(1) // Comment1
+ if 2 == 2 { // Comment2
+ println(3) // Comment3
+ }
+ switch 5 {
+ case 6: println(6) // Comment6
+ println(7)
+ }
+ switch interface{}(9).(type) {// Comment9
+ case int: println(10)
+ println(11)
+ }
+ select {
+ case <-(chan bool)(nil): println(14)
+ println(15)
+ default: println(16)
+ println(17)
+ }
+ println(19,
+ 20)
+ _ = func() { println(21)
+ println(22)
+ }
+ println(24, func() {
+ println(25)
+ })
+ // Leading comment
+ // with multiple lines.
+ println(29) // Comment29
+}
+
+var printLineTests = []struct {
+ line int
+ output string
+}{
+ {1, "println(1) // Comment1"},
+ {2, "if 2 == 2 { // Comment2\n ...\n}"},
+ {3, "println(3) // Comment3"},
+ {5, "switch 5 {\n...\n}"},
+ {6, "case 6:\n println(6) // Comment6\n ..."},
+ {7, "println(7)"},
+ {9, "switch interface{}(9).(type) { // Comment9\n...\n}"},
+ {10, "case int:\n println(10)\n ..."},
+ {14, "case <-(chan bool)(nil):\n println(14)\n ..."},
+ {15, "println(15)"},
+ {16, "default:\n println(16)\n ..."},
+ {17, "println(17)"},
+ {19, "println(19,\n 20)"},
+ {20, "println(19,\n 20)"},
+ {21, "_ = func() {\n println(21)\n println(22)\n}"},
+ {22, "println(22)"},
+ {24, "println(24, func() {\n println(25)\n})"},
+ {25, "println(25)"},
+ {26, "println(24, func() {\n println(25)\n})"},
+ {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"},
+}
+
+func (s *PrinterS) TestPrintLine(c *C) {
+ for _, test := range printLineTests {
+ output, err := PrintLine("printer_test.go", printTestFuncLine+test.line)
+ c.Assert(err, IsNil)
+ c.Assert(output, Equals, test.output)
+ }
+}
+
+var indentTests = []struct {
+ in, out string
+}{
+ {"", ""},
+ {"\n", "\n"},
+ {"a", ">>>a"},
+ {"a\n", ">>>a\n"},
+ {"a\nb", ">>>a\n>>>b"},
+ {" ", ">>> "},
+}
+
+func (s *PrinterS) TestIndent(c *C) {
+ for _, test := range indentTests {
+ out := Indent(test.in, ">>>")
+ c.Assert(out, Equals, test.out)
+ }
+
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/run.go b/Godeps/_workspace/src/gopkg.in/check.v1/run.go
new file mode 100644
index 00000000..da8fd798
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/run.go
@@ -0,0 +1,175 @@
+package check
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+)
+
+// -----------------------------------------------------------------------
+// Test suite registry.
+
+var allSuites []interface{}
+
+// Suite registers the given value as a test suite to be run. Any methods
+// starting with the Test prefix in the given value will be considered as
+// a test method.
+func Suite(suite interface{}) interface{} {
+ allSuites = append(allSuites, suite)
+ return suite
+}
+
+// -----------------------------------------------------------------------
+// Public running interface.
+
+var (
+ oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
+ oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
+ oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
+ oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
+ oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
+ oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
+ oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
+
+ newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
+ newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
+ newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
+ newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
+ newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
+ newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
+ newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
+ newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
+)
+
+// TestingT runs all test suites registered with the Suite function,
+// printing results to stdout, and reporting any failures back to
+// the "testing" package.
+func TestingT(testingT *testing.T) {
+ benchTime := *newBenchTime
+ if benchTime == 1*time.Second {
+ benchTime = *oldBenchTime
+ }
+ conf := &RunConf{
+ Filter: *oldFilterFlag + *newFilterFlag,
+ Verbose: *oldVerboseFlag || *newVerboseFlag,
+ Stream: *oldStreamFlag || *newStreamFlag,
+ Benchmark: *oldBenchFlag || *newBenchFlag,
+ BenchmarkTime: benchTime,
+ BenchmarkMem: *newBenchMem,
+ KeepWorkDir: *oldWorkFlag || *newWorkFlag,
+ }
+ if *oldListFlag || *newListFlag {
+ w := bufio.NewWriter(os.Stdout)
+ for _, name := range ListAll(conf) {
+ fmt.Fprintln(w, name)
+ }
+ w.Flush()
+ return
+ }
+ result := RunAll(conf)
+ println(result.String())
+ if !result.Passed() {
+ testingT.Fail()
+ }
+}
+
+// RunAll runs all test suites registered with the Suite function, using the
+// provided run configuration.
+func RunAll(runConf *RunConf) *Result {
+ result := Result{}
+ for _, suite := range allSuites {
+ result.Add(Run(suite, runConf))
+ }
+ return &result
+}
+
+// Run runs the provided test suite using the provided run configuration.
+func Run(suite interface{}, runConf *RunConf) *Result {
+ runner := newSuiteRunner(suite, runConf)
+ return runner.run()
+}
+
+// ListAll returns the names of all the test functions registered with the
+// Suite function that will be run with the provided run configuration.
+func ListAll(runConf *RunConf) []string {
+ var names []string
+ for _, suite := range allSuites {
+ names = append(names, List(suite, runConf)...)
+ }
+ return names
+}
+
+// List returns the names of the test functions in the given
+// suite that will be run with the provided run configuration.
+func List(suite interface{}, runConf *RunConf) []string {
+ var names []string
+ runner := newSuiteRunner(suite, runConf)
+ for _, t := range runner.tests {
+ names = append(names, t.String())
+ }
+ return names
+}
+
+// -----------------------------------------------------------------------
+// Result methods.
+
+func (r *Result) Add(other *Result) {
+ r.Succeeded += other.Succeeded
+ r.Skipped += other.Skipped
+ r.Failed += other.Failed
+ r.Panicked += other.Panicked
+ r.FixturePanicked += other.FixturePanicked
+ r.ExpectedFailures += other.ExpectedFailures
+ r.Missed += other.Missed
+ if r.WorkDir != "" && other.WorkDir != "" {
+ r.WorkDir += ":" + other.WorkDir
+ } else if other.WorkDir != "" {
+ r.WorkDir = other.WorkDir
+ }
+}
+
+func (r *Result) Passed() bool {
+ return (r.Failed == 0 && r.Panicked == 0 &&
+ r.FixturePanicked == 0 && r.Missed == 0 &&
+ r.RunError == nil)
+}
+
+func (r *Result) String() string {
+ if r.RunError != nil {
+ return "ERROR: " + r.RunError.Error()
+ }
+
+ var value string
+ if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
+ r.Missed == 0 {
+ value = "OK: "
+ } else {
+ value = "OOPS: "
+ }
+ value += fmt.Sprintf("%d passed", r.Succeeded)
+ if r.Skipped != 0 {
+ value += fmt.Sprintf(", %d skipped", r.Skipped)
+ }
+ if r.ExpectedFailures != 0 {
+ value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
+ }
+ if r.Failed != 0 {
+ value += fmt.Sprintf(", %d FAILED", r.Failed)
+ }
+ if r.Panicked != 0 {
+ value += fmt.Sprintf(", %d PANICKED", r.Panicked)
+ }
+ if r.FixturePanicked != 0 {
+ value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
+ }
+ if r.Missed != 0 {
+ value += fmt.Sprintf(", %d MISSED", r.Missed)
+ }
+ if r.WorkDir != "" {
+ value += "\nWORK=" + r.WorkDir
+ }
+ return value
+}
diff --git a/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go b/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go
new file mode 100644
index 00000000..f41fffc3
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go
@@ -0,0 +1,419 @@
+// These tests verify the test running logic.
+
+package check_test
+
+import (
+ "errors"
+ . "gopkg.in/check.v1"
+ "os"
+ "sync"
+)
+
+var runnerS = Suite(&RunS{})
+
+type RunS struct{}
+
+func (s *RunS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Tests ensuring result counting works properly.
+
+func (s *RunS) TestSuccess(c *C) {
+ output := String{}
+ result := Run(&SuccessHelper{}, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 1)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestFailure(c *C) {
+ output := String{}
+ result := Run(&FailHelper{}, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 0)
+ c.Check(result.Failed, Equals, 1)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestFixture(c *C) {
+ output := String{}
+ result := Run(&FixtureHelper{}, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 2)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnTest(c *C) {
+ output := String{}
+ helper := &FixtureHelper{panicOn: "Test1"}
+ result := Run(helper, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 1)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 1)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnSetUpTest(c *C) {
+ output := String{}
+ helper := &FixtureHelper{panicOn: "SetUpTest"}
+ result := Run(helper, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 0)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 1)
+ c.Check(result.Missed, Equals, 2)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnSetUpSuite(c *C) {
+ output := String{}
+ helper := &FixtureHelper{panicOn: "SetUpSuite"}
+ result := Run(helper, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 0)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 1)
+ c.Check(result.Missed, Equals, 2)
+ c.Check(result.RunError, IsNil)
+}
+
+// -----------------------------------------------------------------------
+// Check result aggregation.
+
+func (s *RunS) TestAdd(c *C) {
+ result := &Result{
+ Succeeded: 1,
+ Skipped: 2,
+ Failed: 3,
+ Panicked: 4,
+ FixturePanicked: 5,
+ Missed: 6,
+ ExpectedFailures: 7,
+ }
+ result.Add(&Result{
+ Succeeded: 10,
+ Skipped: 20,
+ Failed: 30,
+ Panicked: 40,
+ FixturePanicked: 50,
+ Missed: 60,
+ ExpectedFailures: 70,
+ })
+ c.Check(result.Succeeded, Equals, 11)
+ c.Check(result.Skipped, Equals, 22)
+ c.Check(result.Failed, Equals, 33)
+ c.Check(result.Panicked, Equals, 44)
+ c.Check(result.FixturePanicked, Equals, 55)
+ c.Check(result.Missed, Equals, 66)
+ c.Check(result.ExpectedFailures, Equals, 77)
+ c.Check(result.RunError, IsNil)
+}
+
+// -----------------------------------------------------------------------
+// Check the Passed() method.
+
+func (s *RunS) TestPassed(c *C) {
+ c.Assert((&Result{}).Passed(), Equals, true)
+ c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true)
+ c.Assert((&Result{Skipped: 1}).Passed(), Equals, true)
+ c.Assert((&Result{Failed: 1}).Passed(), Equals, false)
+ c.Assert((&Result{Panicked: 1}).Passed(), Equals, false)
+ c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false)
+ c.Assert((&Result{Missed: 1}).Passed(), Equals, false)
+ c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Check that result printing is working correctly.
+
+func (s *RunS) TestPrintSuccess(c *C) {
+ result := &Result{Succeeded: 5}
+ c.Check(result.String(), Equals, "OK: 5 passed")
+}
+
+func (s *RunS) TestPrintFailure(c *C) {
+ result := &Result{Failed: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED")
+}
+
+func (s *RunS) TestPrintSkipped(c *C) {
+ result := &Result{Skipped: 5}
+ c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped")
+}
+
+func (s *RunS) TestPrintExpectedFailures(c *C) {
+ result := &Result{ExpectedFailures: 5}
+ c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures")
+}
+
+func (s *RunS) TestPrintPanicked(c *C) {
+ result := &Result{Panicked: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED")
+}
+
+func (s *RunS) TestPrintFixturePanicked(c *C) {
+ result := &Result{FixturePanicked: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED")
+}
+
+func (s *RunS) TestPrintMissed(c *C) {
+ result := &Result{Missed: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED")
+}
+
+func (s *RunS) TestPrintAll(c *C) {
+ result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3,
+ Panicked: 4, FixturePanicked: 5, Missed: 6}
+ c.Check(result.String(), Equals,
+ "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+
+ "5 FIXTURE-PANICKED, 6 MISSED")
+}
+
+func (s *RunS) TestPrintRunError(c *C) {
+ result := &Result{Succeeded: 1, Failed: 1,
+ RunError: errors.New("Kaboom!")}
+ c.Check(result.String(), Equals, "ERROR: Kaboom!")
+}
+
+// -----------------------------------------------------------------------
+// Verify that the method pattern flag works correctly.
+
+func (s *RunS) TestFilterTestName(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "Test[91]"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 5)
+}
+
+func (s *RunS) TestFilterTestNameWithAll(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: ".*"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterSuiteName(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "FixtureHelper"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterSuiteNameAndTestName(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test2")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 5)
+}
+
+func (s *RunS) TestFilterAllOut(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "NotFound"}
+ Run(&helper, &runConf)
+ c.Check(len(helper.calls), Equals, 0)
+}
+
+func (s *RunS) TestRequirePartialMatch(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "est"}
+ Run(&helper, &runConf)
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterError(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "]["}
+ result := Run(&helper, &runConf)
+ c.Check(result.String(), Equals,
+ "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`")
+ c.Check(len(helper.calls), Equals, 0)
+}
+
+// -----------------------------------------------------------------------
+// Verify that List works correctly.
+
+func (s *RunS) TestListFiltered(c *C) {
+ names := List(&FixtureHelper{}, &RunConf{Filter: "1"})
+ c.Assert(names, DeepEquals, []string{
+ "FixtureHelper.Test1",
+ })
+}
+
+func (s *RunS) TestList(c *C) {
+ names := List(&FixtureHelper{}, &RunConf{})
+ c.Assert(names, DeepEquals, []string{
+ "FixtureHelper.Test1",
+ "FixtureHelper.Test2",
+ })
+}
+
+// -----------------------------------------------------------------------
+// Verify that verbose mode prints tests which pass as well.
+
+func (s *RunS) TestVerboseMode(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true}
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" +
+ "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) {
+ helper := FixtureHelper{panicOn: "Test1"}
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true}
+ Run(&helper, &runConf)
+
+ expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line.
+ "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Verify the stream output mode. In this mode there's no output caching.
+
+type StreamHelper struct {
+ l2 sync.Mutex
+ l3 sync.Mutex
+}
+
+func (s *StreamHelper) SetUpSuite(c *C) {
+ c.Log("0")
+}
+
+func (s *StreamHelper) Test1(c *C) {
+ c.Log("1")
+ s.l2.Lock()
+ s.l3.Lock()
+ go func() {
+ s.l2.Lock() // Wait for "2".
+ c.Log("3")
+ s.l3.Unlock()
+ }()
+}
+
+func (s *StreamHelper) Test2(c *C) {
+ c.Log("2")
+ s.l2.Unlock()
+ s.l3.Lock() // Wait for "3".
+ c.Fail()
+ c.Log("4")
+}
+
+func (s *RunS) TestStreamMode(c *C) {
+ helper := &StreamHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Stream: true}
+ Run(helper, &runConf)
+
+ expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" +
+ "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" +
+ "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" +
+ "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" +
+ "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" +
+ "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+type StreamMissHelper struct{}
+
+func (s *StreamMissHelper) SetUpSuite(c *C) {
+ c.Log("0")
+ c.Fail()
+}
+
+func (s *StreamMissHelper) Test1(c *C) {
+ c.Log("1")
+}
+
+func (s *RunS) TestStreamModeWithMiss(c *C) {
+ helper := &StreamMissHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Stream: true}
+ Run(helper, &runConf)
+
+ expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" +
+ "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" +
+ "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" +
+ "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Verify that that the keep work dir request indeed does so.
+
+type WorkDirSuite struct {}
+
+func (s *WorkDirSuite) Test(c *C) {
+ c.MkDir()
+}
+
+func (s *RunS) TestKeepWorkDir(c *C) {
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true}
+ result := Run(&WorkDirSuite{}, &runConf)
+
+ c.Assert(result.String(), Matches, ".*\nWORK=" + result.WorkDir)
+
+ stat, err := os.Stat(result.WorkDir)
+ c.Assert(err, IsNil)
+ c.Assert(stat.IsDir(), Equals, true)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 00000000..a68e67f0
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,188 @@
+
+Copyright (c) 2011-2014 - Canonical Inc.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 00000000..8da58fbf
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md
new file mode 100644
index 00000000..d6c919e6
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,128 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct{C int; D []int ",flow"}
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 00000000..95ec014e
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 00000000..c50c6290
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,665 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ }
+ panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+ mapType reflect.Type
+ terrors []string
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+ d := &decoder{mapType: defaultMapType}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ if d.aliases[n.value] {
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if s, ok := resolved.(string); ok && out.CanAddr() {
+ if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ err := u.UnmarshalText([]byte(s))
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ good = true
+ } else if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ if !good {
+ d.terror(n, tag, out)
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ // okay
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, 0))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ l := len(n.children)
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Set(reflect.Append(out, e))
+ }
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go
new file mode 100644
index 00000000..90ffcc01
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go
@@ -0,0 +1,902 @@
+package yaml_test
+
+import (
+ "errors"
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+ "math"
+ "net"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var unmarshalIntTest = 123
+
+var unmarshalTests = []struct {
+ data string
+ value interface{}
+}{
+ {
+ "",
+ &struct{}{},
+ }, {
+ "{}", &struct{}{},
+ }, {
+ "v: hi",
+ map[string]string{"v": "hi"},
+ }, {
+ "v: hi", map[string]interface{}{"v": "hi"},
+ }, {
+ "v: true",
+ map[string]string{"v": "true"},
+ }, {
+ "v: true",
+ map[string]interface{}{"v": true},
+ }, {
+ "v: 10",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 0b10",
+ map[string]interface{}{"v": 2},
+ }, {
+ "v: 0xA",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 4294967296",
+ map[string]int64{"v": 4294967296},
+ }, {
+ "v: 0.1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .Inf",
+ map[string]interface{}{"v": math.Inf(+1)},
+ }, {
+ "v: -.Inf",
+ map[string]interface{}{"v": math.Inf(-1)},
+ }, {
+ "v: -10",
+ map[string]interface{}{"v": -10},
+ }, {
+ "v: -.1",
+ map[string]interface{}{"v": -0.1},
+ },
+
+ // Simple values.
+ {
+ "123",
+ &unmarshalIntTest,
+ },
+
+ // Floats from spec
+ {
+ "canonical: 6.8523e+5",
+ map[string]interface{}{"canonical": 6.8523e+5},
+ }, {
+ "expo: 685.230_15e+03",
+ map[string]interface{}{"expo": 685.23015e+03},
+ }, {
+ "fixed: 685_230.15",
+ map[string]interface{}{"fixed": 685230.15},
+ }, {
+ "neginf: -.inf",
+ map[string]interface{}{"neginf": math.Inf(-1)},
+ }, {
+ "fixed: 685_230.15",
+ map[string]float64{"fixed": 685230.15},
+ },
+ //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
+ //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
+
+ // Bools from spec
+ {
+ "canonical: y",
+ map[string]interface{}{"canonical": true},
+ }, {
+ "answer: NO",
+ map[string]interface{}{"answer": false},
+ }, {
+ "logical: True",
+ map[string]interface{}{"logical": true},
+ }, {
+ "option: on",
+ map[string]interface{}{"option": true},
+ }, {
+ "option: on",
+ map[string]bool{"option": true},
+ },
+ // Ints from spec
+ {
+ "canonical: 685230",
+ map[string]interface{}{"canonical": 685230},
+ }, {
+ "decimal: +685_230",
+ map[string]interface{}{"decimal": 685230},
+ }, {
+ "octal: 02472256",
+ map[string]interface{}{"octal": 685230},
+ }, {
+ "hexa: 0x_0A_74_AE",
+ map[string]interface{}{"hexa": 685230},
+ }, {
+ "bin: 0b1010_0111_0100_1010_1110",
+ map[string]interface{}{"bin": 685230},
+ }, {
+ "bin: -0b101010",
+ map[string]interface{}{"bin": -42},
+ }, {
+ "decimal: +685_230",
+ map[string]int{"decimal": 685230},
+ },
+
+ //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
+
+ // Nulls from spec
+ {
+ "empty:",
+ map[string]interface{}{"empty": nil},
+ }, {
+ "canonical: ~",
+ map[string]interface{}{"canonical": nil},
+ }, {
+ "english: null",
+ map[string]interface{}{"english": nil},
+ }, {
+ "~: null key",
+ map[interface{}]string{nil: "null key"},
+ }, {
+ "empty:",
+ map[string]*bool{"empty": nil},
+ },
+
+ // Flow sequence
+ {
+ "seq: [A,B]",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq: [A,B,C,]",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq: [A,1,C]",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+ // Block sequence
+ {
+ "seq:\n - A\n - B",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq:\n - A\n - B\n - C",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+
+ // Literal block scalar
+ {
+ "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
+ map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
+ },
+
+ // Folded block scalar
+ {
+ "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
+ map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
+ },
+
+ // Map inside interface with no type hints.
+ {
+ "a: {b: c}",
+ map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ },
+
+ // Structs and type conversions.
+ {
+ "hello: world",
+ &struct{ Hello string }{"world"},
+ }, {
+ "a: {b: c}",
+ &struct{ A struct{ B string } }{struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A map[string]string }{map[string]string{"b": "c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
+ }, {
+ "a:",
+ &struct{ A map[string]string }{},
+ }, {
+ "a: 1",
+ &struct{ A int }{1},
+ }, {
+ "a: 1",
+ &struct{ A float64 }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A int }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A uint }{1},
+ }, {
+ "a: [1, 2]",
+ &struct{ A []int }{[]int{1, 2}},
+ }, {
+ "a: 1",
+ &struct{ B int }{0},
+ }, {
+ "a: 1",
+ &struct {
+ B int "a"
+ }{1},
+ }, {
+ "a: y",
+ &struct{ A bool }{true},
+ },
+
+ // Some cross type conversions
+ {
+ "v: 42",
+ map[string]uint{"v": 42},
+ }, {
+ "v: -42",
+ map[string]uint{},
+ }, {
+ "v: 4294967296",
+ map[string]uint64{"v": 4294967296},
+ }, {
+ "v: -4294967296",
+ map[string]uint64{},
+ },
+
+ // int
+ {
+ "int_max: 2147483647",
+ map[string]int{"int_max": math.MaxInt32},
+ },
+ {
+ "int_min: -2147483648",
+ map[string]int{"int_min": math.MinInt32},
+ },
+ {
+ "int_overflow: 9223372036854775808", // math.MaxInt64 + 1
+ map[string]int{},
+ },
+
+ // int64
+ {
+ "int64_max: 9223372036854775807",
+ map[string]int64{"int64_max": math.MaxInt64},
+ },
+ {
+ "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
+ map[string]int64{"int64_max_base2": math.MaxInt64},
+ },
+ {
+ "int64_min: -9223372036854775808",
+ map[string]int64{"int64_min": math.MinInt64},
+ },
+ {
+ "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
+ map[string]int64{"int64_neg_base2": -math.MaxInt64},
+ },
+ {
+ "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
+ map[string]int64{},
+ },
+
+ // uint
+ {
+ "uint_min: 0",
+ map[string]uint{"uint_min": 0},
+ },
+ {
+ "uint_max: 4294967295",
+ map[string]uint{"uint_max": math.MaxUint32},
+ },
+ {
+ "uint_underflow: -1",
+ map[string]uint{},
+ },
+
+ // uint64
+ {
+ "uint64_min: 0",
+ map[string]uint{"uint64_min": 0},
+ },
+ {
+ "uint64_max: 18446744073709551615",
+ map[string]uint64{"uint64_max": math.MaxUint64},
+ },
+ {
+ "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
+ map[string]uint64{"uint64_max_base2": math.MaxUint64},
+ },
+ {
+ "uint64_maxint64: 9223372036854775807",
+ map[string]uint64{"uint64_maxint64": math.MaxInt64},
+ },
+ {
+ "uint64_underflow: -1",
+ map[string]uint64{},
+ },
+
+ // float32
+ {
+ "float32_max: 3.40282346638528859811704183484516925440e+38",
+ map[string]float32{"float32_max": math.MaxFloat32},
+ },
+ {
+ "float32_nonzero: 1.401298464324817070923729583289916131280e-45",
+ map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
+ },
+ {
+ "float32_maxuint64: 18446744073709551615",
+ map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
+ },
+ {
+ "float32_maxuint64+1: 18446744073709551616",
+ map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
+ },
+
+ // float64
+ {
+ "float64_max: 1.797693134862315708145274237317043567981e+308",
+ map[string]float64{"float64_max": math.MaxFloat64},
+ },
+ {
+ "float64_nonzero: 4.940656458412465441765687928682213723651e-324",
+ map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
+ },
+ {
+ "float64_maxuint64: 18446744073709551615",
+ map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
+ },
+ {
+ "float64_maxuint64+1: 18446744073709551616",
+ map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
+ },
+
+ // Overflow cases.
+ {
+ "v: 4294967297",
+ map[string]int32{},
+ }, {
+ "v: 128",
+ map[string]int8{},
+ },
+
+ // Quoted values.
+ {
+ "'1': '\"2\"'",
+ map[interface{}]interface{}{"1": "\"2\""},
+ }, {
+ "v:\n- A\n- 'B\n\n C'\n",
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ },
+
+ // Explicit tags.
+ {
+ "v: !!float '1.1'",
+ map[string]interface{}{"v": 1.1},
+ }, {
+ "v: !!null ''",
+ map[string]interface{}{"v": nil},
+ }, {
+ "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
+ map[string]interface{}{"v": 1},
+ },
+
+ // Anchors and aliases.
+ {
+ "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
+ &struct{ A, B, C, D int }{1, 2, 1, 2},
+ }, {
+ "a: &a {c: 1}\nb: *a",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ }, {
+ "a: &a [1, 2]\nb: *a",
+ &struct{ B []int }{[]int{1, 2}},
+ },
+
+ // Bug #1133337
+ {
+ "foo: ''",
+ map[string]*string{"foo": new(string)},
+ }, {
+ "foo: null",
+ map[string]string{"foo": ""},
+ }, {
+ "foo: null",
+ map[string]interface{}{"foo": nil},
+ },
+
+ // Ignored field
+ {
+ "a: 1\nb: 2\n",
+ &struct {
+ A int
+ B int "-"
+ }{1, 0},
+ },
+
+ // Bug #1191981
+ {
+ "" +
+ "%YAML 1.1\n" +
+ "--- !!str\n" +
+ `"Generic line break (no glyph)\n\` + "\n" +
+ ` Generic line break (glyphed)\n\` + "\n" +
+ ` Line separator\u2028\` + "\n" +
+ ` Paragraph separator\u2029"` + "\n",
+ "" +
+ "Generic line break (no glyph)\n" +
+ "Generic line break (glyphed)\n" +
+ "Line separator\u2028Paragraph separator\u2029",
+ },
+
+ // Struct inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ },
+
+ // bug 1243827
+ {
+ "a: -b_c",
+ map[string]interface{}{"a": "-b_c"},
+ },
+ {
+ "a: +b_c",
+ map[string]interface{}{"a": "+b_c"},
+ },
+ {
+ "a: 50cent_of_dollar",
+ map[string]interface{}{"a": "50cent_of_dollar"},
+ },
+
+ // Duration
+ {
+ "a: 3s",
+ map[string]time.Duration{"a": 3 * time.Second},
+ },
+
+ // Issue #24.
+ {
+ "a: ",
+ map[string]string{"a": ""},
+ },
+
+ // Base 60 floats are obsolete and unsupported.
+ {
+ "a: 1:1\n",
+ map[string]string{"a": "1:1"},
+ },
+
+ // Binary data.
+ {
+ "a: !!binary gIGC\n",
+ map[string]string{"a": "\x80\x81\x82"},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
+ map[string]string{"a": strings.Repeat("\x00", 52)},
+ },
+
+ // Ordered maps.
+ {
+ "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ },
+
+ // Issue #39.
+ {
+ "a:\n b:\n c: d\n",
+ map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
+ },
+
+ // Custom map type.
+ {
+ "a: {b: c}",
+ M{"a": M{"b": "c"}},
+ },
+
+ // Support encoding.TextUnmarshaler.
+ {
+ "a: 1.2.3.4\n",
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ },
+}
+
+type M map[interface{}]interface{}
+
+type inlineB struct {
+ B int
+ inlineC `yaml:",inline"`
+}
+
+type inlineC struct {
+ C int
+}
+
+func (s *S) TestUnmarshal(c *C) {
+ for _, item := range unmarshalTests {
+ t := reflect.ValueOf(item.value).Type()
+ var value interface{}
+ switch t.Kind() {
+ case reflect.Map:
+ value = reflect.MakeMap(t).Interface()
+ case reflect.String:
+ value = reflect.New(t).Interface()
+ case reflect.Ptr:
+ value = reflect.New(t.Elem()).Interface()
+ default:
+ c.Fatalf("missing case for %s", t)
+ }
+ err := yaml.Unmarshal([]byte(item.data), value)
+ if _, ok := err.(*yaml.TypeError); !ok {
+ c.Assert(err, IsNil)
+ }
+ if t.Kind() == reflect.String {
+ c.Assert(*value.(*string), Equals, item.value)
+ } else {
+ c.Assert(value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalNaN(c *C) {
+ value := map[string]interface{}{}
+ err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
+ c.Assert(err, IsNil)
+ c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
+}
+
+var unmarshalErrorTests = []struct {
+ data, error string
+}{
+ {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
+ {"v: [A,", "yaml: line 1: did not find expected node content"},
+ {"v:\n- [A,", "yaml: line 2: did not find expected node content"},
+ {"a: *b\n", "yaml: unknown anchor 'b' referenced"},
+ {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
+ {"value: -", "yaml: block sequence entries are not allowed in this context"},
+ {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
+ {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
+ {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
+}
+
+func (s *S) TestUnmarshalErrors(c *C) {
+ for _, item := range unmarshalErrorTests {
+ var value interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+}
+
+var unmarshalerTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
+ {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
+ {"_: 10", "!!int", 10},
+ {"_: null", "!!null", nil},
+ {`_: BAR!`, "!!str", "BAR!"},
+ {`_: "BAR!"`, "!!str", "BAR!"},
+ {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+}
+
+var unmarshalerResult = map[int]error{}
+
+type unmarshalerType struct {
+ value interface{}
+}
+
+func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
+ if err := unmarshal(&o.value); err != nil {
+ return err
+ }
+ if i, ok := o.value.(int); ok {
+ if result, ok := unmarshalerResult[i]; ok {
+ return result
+ }
+ }
+ return nil
+}
+
+type unmarshalerPointer struct {
+ Field *unmarshalerType "_"
+}
+
+type unmarshalerValue struct {
+ Field unmarshalerType "_"
+}
+
+func (s *S) TestUnmarshalerPointerField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerPointer{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ if item.value == nil {
+ c.Assert(obj.Field, IsNil)
+ } else {
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalerValueField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerValue{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalerWholeDocument(c *C) {
+ obj := &unmarshalerType{}
+ err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
+ c.Assert(err, IsNil)
+ value, ok := obj.value.(map[interface{}]interface{})
+ c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
+ c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
+}
+
+func (s *S) TestUnmarshalerTypeError(c *C) {
+ unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
+ unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
+ defer func() {
+ delete(unmarshalerResult, 2)
+ delete(unmarshalerResult, 4)
+ }()
+
+ type T struct {
+ Before int
+ After int
+ M map[string]*unmarshalerType
+ }
+ var v T
+ data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " foo\n"+
+ " bar\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+ c.Assert(v.M["abc"], NotNil)
+ c.Assert(v.M["def"], IsNil)
+ c.Assert(v.M["ghi"], NotNil)
+ c.Assert(v.M["jkl"], IsNil)
+
+ c.Assert(v.M["abc"].value, Equals, 1)
+ c.Assert(v.M["ghi"].value, Equals, 3)
+}
+
+type proxyTypeError struct{}
+
+func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ var a int32
+ var b int64
+ if err := unmarshal(&s); err != nil {
+ panic(err)
+ }
+ if s == "a" {
+ if err := unmarshal(&b); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&a)
+ }
+ if err := unmarshal(&a); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&b)
+}
+
+func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
+ type T struct {
+ Before int
+ After int
+ M map[string]*proxyTypeError
+ }
+ var v T
+ data := `{before: A, m: {abc: a, def: b}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " line 1: cannot unmarshal !!str `a` into int32\n"+
+ " line 1: cannot unmarshal !!str `b` into int64\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+}
+
+type failingUnmarshaler struct{}
+
+var failingErr = errors.New("failingErr")
+
+func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ return failingErr
+}
+
+func (s *S) TestUnmarshalerError(c *C) {
+ err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+// From http://yaml.org/type/merge.html
+var mergeTests = `
+anchors:
+ list:
+ - &CENTER { "x": 1, "y": 2 }
+ - &LEFT { "x": 0, "y": 2 }
+ - &BIG { "r": 10 }
+ - &SMALL { "r": 1 }
+
+# All the following maps are equal:
+
+plain:
+ # Explicit keys
+ "x": 1
+ "y": 2
+ "r": 10
+ label: center/big
+
+mergeOne:
+ # Merge one map
+ << : *CENTER
+ "r": 10
+ label: center/big
+
+mergeMultiple:
+ # Merge multiple maps
+ << : [ *CENTER, *BIG ]
+ label: center/big
+
+override:
+ # Override
+ << : [ *BIG, *LEFT, *SMALL ]
+ "x": 1
+ label: center/big
+
+shortTag:
+ # Explicit short merge tag
+ !!merge "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+longTag:
+ # Explicit merge long tag
+ ! "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+inlineMap:
+ # Inlined map
+ << : {"x": 1, "y": 2, "r": 10}
+ label: center/big
+
+inlineSequenceMap:
+ # Inlined map in sequence
+ << : [ *CENTER, {"r": 10} ]
+ label: center/big
+`
+
+func (s *S) TestMerge(c *C) {
+ var want = map[interface{}]interface{}{
+ "x": 1,
+ "y": 2,
+ "r": 10,
+ "label": "center/big",
+ }
+
+ var m map[interface{}]interface{}
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
+ }
+}
+
+func (s *S) TestMergeStruct(c *C) {
+ type Data struct {
+ X, Y, R int
+ Label string
+ }
+ want := Data{1, 2, 10, "center/big"}
+
+ var m map[string]Data
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, Equals, want, Commentf("test %q failed", name))
+ }
+}
+
+var unmarshalNullTests = []func() interface{}{
+ func() interface{} { var v interface{}; v = "v"; return &v },
+ func() interface{} { var s = "s"; return &s },
+ func() interface{} { var s = "s"; sptr := &s; return &sptr },
+ func() interface{} { var i = 1; return &i },
+ func() interface{} { var i = 1; iptr := &i; return &iptr },
+ func() interface{} { m := map[string]int{"s": 1}; return &m },
+ func() interface{} { m := map[string]int{"s": 1}; return m },
+}
+
+func (s *S) TestUnmarshalNull(c *C) {
+ for _, test := range unmarshalNullTests {
+ item := test()
+ zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
+ err := yaml.Unmarshal([]byte("null"), item)
+ c.Assert(err, IsNil)
+ if reflect.TypeOf(item).Kind() == reflect.Map {
+ c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
+ } else {
+ c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
+ }
+ }
+}
+
+//var data []byte
+//func init() {
+// var err error
+// data, err = ioutil.ReadFile("/tmp/file.yaml")
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkUnmarshal(c *C) {
+// var err error
+// for i := 0; i < c.N; i++ {
+// var v map[string]interface{}
+// err = yaml.Unmarshal(data, &v)
+// }
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkMarshal(c *C) {
+// var v map[string]interface{}
+// yaml.Unmarshal(data, &v)
+// c.ResetTimer()
+// for i := 0; i < c.N; i++ {
+// yaml.Marshal(&v)
+// }
+//}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 00000000..9b3dc4a4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+ return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[0])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 00000000..972bc038
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,290 @@
+package yaml
+
+import (
+ "encoding"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ if m, ok := iface.(Marshaler); ok {
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ }
+ if m, ok := iface.(encoding.TextMarshaler); ok {
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ rtag, rs := resolve("", s)
+ if rtag == yaml_BINARY_TAG {
+ if tag == "" || tag == yaml_STR_TAG {
+ tag = rtag
+ s = rs.(string)
+ } else if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ } else {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ }
+ if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if strings.Contains(s, "\n") {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go
new file mode 100644
index 00000000..cdbf64a9
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go
@@ -0,0 +1,434 @@
+package yaml_test
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+ "net"
+)
+
+var marshalIntTest = 123
+
+var marshalTests = []struct {
+ value interface{}
+ data string
+}{
+ {
+ nil,
+ "null\n",
+ }, {
+ &struct{}{},
+ "{}\n",
+ }, {
+ map[string]string{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]interface{}{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]string{"v": "true"},
+ "v: \"true\"\n",
+ }, {
+ map[string]string{"v": "false"},
+ "v: \"false\"\n",
+ }, {
+ map[string]interface{}{"v": true},
+ "v: true\n",
+ }, {
+ map[string]interface{}{"v": false},
+ "v: false\n",
+ }, {
+ map[string]interface{}{"v": 10},
+ "v: 10\n",
+ }, {
+ map[string]interface{}{"v": -10},
+ "v: -10\n",
+ }, {
+ map[string]uint{"v": 42},
+ "v: 42\n",
+ }, {
+ map[string]interface{}{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]int64{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]uint64{"v": 4294967296},
+ "v: 4294967296\n",
+ }, {
+ map[string]interface{}{"v": "10"},
+ "v: \"10\"\n",
+ }, {
+ map[string]interface{}{"v": 0.1},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float64(0.1)},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": -0.1},
+ "v: -0.1\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(+1)},
+ "v: .inf\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(-1)},
+ "v: -.inf\n",
+ }, {
+ map[string]interface{}{"v": math.NaN()},
+ "v: .nan\n",
+ }, {
+ map[string]interface{}{"v": nil},
+ "v: null\n",
+ }, {
+ map[string]interface{}{"v": ""},
+ "v: \"\"\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B"}},
+ "v:\n- A\n- B\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ "v:\n- A\n- |-\n B\n C\n",
+ }, {
+ map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
+ "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
+ }, {
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ "a:\n b: c\n",
+ }, {
+ map[string]interface{}{"a": "-"},
+ "a: '-'\n",
+ },
+
+ // Simple values.
+ {
+ &marshalIntTest,
+ "123\n",
+ },
+
+ // Structures
+ {
+ &struct{ Hello string }{"world"},
+ "hello: world\n",
+ }, {
+ &struct {
+ A struct {
+ B string
+ }
+ }{struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{&struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{},
+ "a: null\n",
+ }, {
+ &struct{ A int }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A []int }{[]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct {
+ B int "a"
+ }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A bool }{true},
+ "a: true\n",
+ },
+
+ // Conditional flag
+ {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ }, {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{0, 0},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X int } "a,omitempty"
+ B int "b,omitempty"
+ }{nil, 0},
+ "{}\n",
+ },
+
+ // Flow flag
+ {
+ &struct {
+ A []int "a,flow"
+ }{[]int{1, 2}},
+ "a: [1, 2]\n",
+ }, {
+ &struct {
+ A map[string]string "a,flow"
+ }{map[string]string{"b": "c", "d": "e"}},
+ "a: {b: c, d: e}\n",
+ }, {
+ &struct {
+ A struct {
+ B, D string
+ } "a,flow"
+ }{struct{ B, D string }{"c", "e"}},
+ "a: {b: c, d: e}\n",
+ },
+
+ // Unexported field
+ {
+ &struct {
+ u int
+ A int
+ }{0, 1},
+ "a: 1\n",
+ },
+
+ // Ignored field
+ {
+ &struct {
+ A int
+ B int "-"
+ }{1, 2},
+ "a: 1\n",
+ },
+
+ // Struct inlining
+ {
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Duration
+ {
+ map[string]time.Duration{"a": 3 * time.Second},
+ "a: 3s\n",
+ },
+
+ // Issue #24: bug in map merging logic.
+ {
+ map[string]string{"a": ""},
+ "a: \n",
+ },
+
+ // Issue #34: marshal unsupported base 60 floats quoted for compatibility
+ // with old YAML 1.1 parsers.
+ {
+ map[string]string{"a": "1:1"},
+ "a: \"1:1\"\n",
+ },
+
+ // Binary data.
+ {
+ map[string]string{"a": "\x00"},
+ "a: \"\\0\"\n",
+ }, {
+ map[string]string{"a": "\x80\x81\x82"},
+ "a: !!binary gIGC\n",
+ }, {
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ },
+
+ // Ordered maps.
+ {
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
+ },
+
+ // Encode unicode as utf-8 rather than in escaped form.
+ {
+ map[string]string{"a": "你好"},
+ "a: 你好\n",
+ },
+
+ // Support encoding.TextMarshaler.
+ {
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ "a: 1.2.3.4\n",
+ },
+}
+
+func (s *S) TestMarshal(c *C) {
+ for _, item := range marshalTests {
+ data, err := yaml.Marshal(item.value)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data)
+ }
+}
+
+var marshalErrorTests = []struct {
+ value interface{}
+ error string
+ panic string
+}{{
+ value: &struct {
+ B int
+ inlineB ",inline"
+ }{1, inlineB{2, inlineC{3}}},
+ panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
+}}
+
+func (s *S) TestMarshalErrors(c *C) {
+ for _, item := range marshalErrorTests {
+ if item.panic != "" {
+ c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
+ } else {
+ _, err := yaml.Marshal(item.value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+ }
+}
+
+func (s *S) TestMarshalTypeCache(c *C) {
+ var data []byte
+ var err error
+ func() {
+ type T struct{ A int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ func() {
+ type T struct{ B int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ c.Assert(string(data), Equals, "b: 0\n")
+}
+
+var marshalerTests = []struct {
+ data string
+ value interface{}
+}{
+ {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
+ {"_:\n- 1\n- A\n", []interface{}{1, "A"}},
+ {"_: 10\n", 10},
+ {"_: null\n", nil},
+ {"_: BAR!\n", "BAR!"},
+}
+
+type marshalerType struct {
+ value interface{}
+}
+
+func (o marshalerType) MarshalYAML() (interface{}, error) {
+ return o.value, nil
+}
+
+type marshalerValue struct {
+ Field marshalerType "_"
+}
+
+func (s *S) TestMarshaler(c *C) {
+ for _, item := range marshalerTests {
+ obj := &marshalerValue{}
+ obj.Field.value = item.value
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, string(item.data))
+ }
+}
+
+func (s *S) TestMarshalerWholeDocument(c *C) {
+ obj := &marshalerType{}
+ obj.value = map[string]string{"hello": "world!"}
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hello: world!\n")
+}
+
+type failingMarshaler struct{}
+
+func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
+ return nil, failingErr
+}
+
+func (s *S) TestMarshalerError(c *C) {
+ _, err := yaml.Marshal(&failingMarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+func (s *S) TestSortedOutput(c *C) {
+ order := []interface{}{
+ false,
+ true,
+ 1,
+ uint(1),
+ 1.0,
+ 1.1,
+ 1.2,
+ 2,
+ uint(2),
+ 2.0,
+ 2.1,
+ "",
+ ".1",
+ ".2",
+ ".a",
+ "1",
+ "2",
+ "a!10",
+ "a/2",
+ "a/10",
+ "a~10",
+ "ab/1",
+ "b/1",
+ "b/01",
+ "b/2",
+ "b/02",
+ "b/3",
+ "b/03",
+ "b1",
+ "b01",
+ "b3",
+ "c2.10",
+ "c10.2",
+ "d1",
+ "d12",
+ "d12a",
+ }
+ m := make(map[interface{}]int)
+ for _, k := range order {
+ m[k] = 1
+ }
+ data, err := yaml.Marshal(m)
+ c.Assert(err, IsNil)
+ out := "\n" + string(data)
+ last := 0
+ for i, k := range order {
+ repr := fmt.Sprint(k)
+ if s, ok := k.(string); ok {
+ if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
+ repr = `"` + repr + `"`
+ }
+ }
+ index := strings.Index(out, "\n"+repr+":")
+ if index == -1 {
+ c.Fatalf("%#v is not in the output: %#v", k, out)
+ }
+ if index < last {
+ c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
+ }
+ last = index
+ }
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 00000000..0a7037ad
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+ return false
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 00000000..d5fb0972
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,391 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ }
+ buffer_len += width
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 00000000..93a86327
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,203 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ return true
+ }
+ return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, -int(intv)
+ } else {
+ return yaml_INT_TAG, -intv
+ }
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ if tag == yaml_BINARY_TAG {
+ return yaml_BINARY_TAG, in
+ }
+ if utf8.ValidString(in) {
+ return yaml_STR_TAG, in
+ }
+ return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 00000000..fe93b190
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each intendation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the intendation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found uknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the intendation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+
+ // Get the intendation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the intendation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following intendation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan intendation spaces and line breaks for a block scalar. Determine the
+// intendation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the intendation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the intendation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the intendation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an intendation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse intendation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate intendation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check intendation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 00000000..5958822f
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go
new file mode 100644
index 00000000..c5cf1ed4
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go
@@ -0,0 +1,12 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var _ = Suite(&S{})
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 00000000..190362f2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 00000000..5d1b86c2
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,334 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if d.terrors != nil {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps.
+//
+// inline Inline the struct it's applied to, so its fields
+// are processed as if they were part of the outer
+// struct.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: " + format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ // TODO: Implement support for inline maps.
+ //case reflect.Map:
+ // if inlineMap >= 0 {
+ // return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ // }
+ // if field.Type.Key() != reflect.TypeOf("") {
+ // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ // }
+ // inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 00000000..4b020b1b
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 00000000..8110ce3c
--- /dev/null
+++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/circle.yml b/circle.yml
index 1becb4fa..fd224558 100644
--- a/circle.yml
+++ b/circle.yml
@@ -68,7 +68,7 @@ test:
# - gvm use bleed && go version
# FMT
- - gvm use stable && test -z "$(gofmt -s -l . | tee /dev/stderr)":
+ - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)":
pwd: $BASE_STABLE
# VET
@@ -76,7 +76,7 @@ test:
pwd: $BASE_STABLE
# LINT
- - gvm use stable && test -z "$(golint ./... | tee /dev/stderr)":
+ - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)":
pwd: $BASE_STABLE
override: