Vendor update for aws sdk
Updated to latest version of go aws sdk. Use vendored sub pakages within aws sdk. Adds missing vendor packages for letsencrypt Fixes #1832 Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
This commit is contained in:
parent
2052f29be6
commit
acae5dcfff
64 changed files with 5659 additions and 1382 deletions
40
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
|
@ -42,9 +42,12 @@ type Error interface {
|
|||
OrigErr() error
|
||||
}
|
||||
|
||||
// BatchError is a batch of errors which also wraps lower level errors with code, message,
|
||||
// and original errors. Calling Error() will only return the error that is at the end
|
||||
// of the list.
|
||||
// BatchError is a batch of errors which also wraps lower level errors with
|
||||
// code, message, and original errors. Calling Error() will include all errors
|
||||
// that occured in the batch.
|
||||
//
|
||||
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
||||
// compatibility.
|
||||
type BatchError interface {
|
||||
// Satisfy the generic error interface.
|
||||
error
|
||||
|
@ -59,20 +62,35 @@ type BatchError interface {
|
|||
OrigErrs() []error
|
||||
}
|
||||
|
||||
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
||||
// code, message, and original errors. Calling Error() will include all errors
|
||||
// that occured in the batch.
|
||||
//
|
||||
// Replaces BatchError
|
||||
type BatchedErrors interface {
|
||||
// Satisfy the base Error interface.
|
||||
Error
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErrs() []error
|
||||
}
|
||||
|
||||
// New returns an Error object described by the code, message, and origErr.
|
||||
//
|
||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||
// Error object and will instead be returned.
|
||||
func New(code, message string, origErr error) Error {
|
||||
if e, ok := origErr.(Error); ok && e != nil {
|
||||
return e
|
||||
var errs []error
|
||||
if origErr != nil {
|
||||
errs = append(errs, origErr)
|
||||
}
|
||||
return newBaseError(code, message, origErr)
|
||||
return newBaseError(code, message, errs)
|
||||
}
|
||||
|
||||
// NewBatchError returns an baseError with an expectation of an array of errors
|
||||
func NewBatchError(code, message string, errs []error) BatchError {
|
||||
return newBaseErrors(code, message, errs)
|
||||
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
||||
// array of errors.
|
||||
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
||||
return newBaseError(code, message, errs)
|
||||
}
|
||||
|
||||
// A RequestFailure is an interface to extract request failure information from
|
||||
|
@ -85,9 +103,9 @@ func NewBatchError(code, message string, errs []error) BatchError {
|
|||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if reqerr, ok := err.(RequestFailure); ok {
|
||||
// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||
// } else {
|
||||
// log.Printf("Error:", err.Error()
|
||||
// log.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
|
|
67
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
67
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
|
@ -34,36 +34,17 @@ type baseError struct {
|
|||
errs []error
|
||||
}
|
||||
|
||||
// newBaseError returns an error object for the code, message, and err.
|
||||
// newBaseError returns an error object for the code, message, and errors.
|
||||
//
|
||||
// code is a short no whitespace phrase depicting the classification of
|
||||
// the error that is being created.
|
||||
//
|
||||
// message is the free flow string containing detailed information about the error.
|
||||
// message is the free flow string containing detailed information about the
|
||||
// error.
|
||||
//
|
||||
// origErr is the error object which will be nested under the new error to be returned.
|
||||
func newBaseError(code, message string, origErr error) *baseError {
|
||||
b := &baseError{
|
||||
code: code,
|
||||
message: message,
|
||||
}
|
||||
|
||||
if origErr != nil {
|
||||
b.errs = append(b.errs, origErr)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// newBaseErrors returns an error object for the code, message, and errors.
|
||||
//
|
||||
// code is a short no whitespace phrase depicting the classification of
|
||||
// the error that is being created.
|
||||
//
|
||||
// message is the free flow string containing detailed information about the error.
|
||||
//
|
||||
// origErrs is the error objects which will be nested under the new errors to be returned.
|
||||
func newBaseErrors(code, message string, origErrs []error) *baseError {
|
||||
// origErrs is the error objects which will be nested under the new errors to
|
||||
// be returned.
|
||||
func newBaseError(code, message string, origErrs []error) *baseError {
|
||||
b := &baseError{
|
||||
code: code,
|
||||
message: message,
|
||||
|
@ -103,19 +84,26 @@ func (b baseError) Message() string {
|
|||
return b.message
|
||||
}
|
||||
|
||||
// OrigErr returns the original error if one was set. Nil is returned if no error
|
||||
// was set. This only returns the first element in the list. If the full list is
|
||||
// needed, use BatchError
|
||||
// OrigErr returns the original error if one was set. Nil is returned if no
|
||||
// error was set. This only returns the first element in the list. If the full
|
||||
// list is needed, use BatchedErrors.
|
||||
func (b baseError) OrigErr() error {
|
||||
if size := len(b.errs); size > 0 {
|
||||
switch len(b.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return b.errs[0]
|
||||
default:
|
||||
if err, ok := b.errs[0].(Error); ok {
|
||||
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
||||
}
|
||||
return NewBatchError("BatchedErrors",
|
||||
"multiple errors occured", b.errs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is returned if
|
||||
// no error was set:w
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||
// returned if no error was set.
|
||||
func (b baseError) OrigErrs() []error {
|
||||
return b.errs
|
||||
}
|
||||
|
@ -133,8 +121,8 @@ type requestError struct {
|
|||
requestID string
|
||||
}
|
||||
|
||||
// newRequestError returns a wrapped error with additional information for request
|
||||
// status code, and service requestID.
|
||||
// newRequestError returns a wrapped error with additional information for
|
||||
// request status code, and service requestID.
|
||||
//
|
||||
// Should be used to wrap all request which involve service requests. Even if
|
||||
// the request failed without a service response, but had an HTTP status code
|
||||
|
@ -173,6 +161,15 @@ func (r requestError) RequestID() string {
|
|||
return r.requestID
|
||||
}
|
||||
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||
// returned if no error was set.
|
||||
func (r requestError) OrigErrs() []error {
|
||||
if b, ok := r.awsError.(BatchedErrors); ok {
|
||||
return b.OrigErrs()
|
||||
}
|
||||
return []error{r.OrigErr()}
|
||||
}
|
||||
|
||||
// An error list that satisfies the golang interface
|
||||
type errorList []error
|
||||
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
|
@ -91,6 +91,10 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
|||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
default:
|
||||
if !v.IsValid() {
|
||||
fmt.Fprint(buf, "<invalid value>")
|
||||
return
|
||||
}
|
||||
format := "%v"
|
||||
switch v.Interface().(type) {
|
||||
case string:
|
||||
|
|
53
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
|
@ -1,8 +1,8 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
@ -30,16 +30,61 @@ func (d DefaultRetryer) MaxRetries() int {
|
|||
return d.NumMaxRetries
|
||||
}
|
||||
|
||||
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30)
|
||||
// Set the upper limit of delay in retrying at ~five minutes
|
||||
minTime := 30
|
||||
throttle := d.shouldThrottle(r)
|
||||
if throttle {
|
||||
minTime = 500
|
||||
}
|
||||
|
||||
retryCount := r.RetryCount
|
||||
if retryCount > 13 {
|
||||
retryCount = 13
|
||||
} else if throttle && retryCount > 8 {
|
||||
retryCount = 8
|
||||
}
|
||||
|
||||
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
}
|
||||
|
||||
// ShouldRetry returns if the request should be retried.
|
||||
// ShouldRetry returns true if the request should be retried.
|
||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||
if r.HTTPResponse.StatusCode >= 500 {
|
||||
return true
|
||||
}
|
||||
return r.IsErrorRetryable()
|
||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||
}
|
||||
|
||||
// ShouldThrottle returns true if the request should be throttled.
|
||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||
if r.HTTPResponse.StatusCode == 502 ||
|
||||
r.HTTPResponse.StatusCode == 503 ||
|
||||
r.HTTPResponse.StatusCode == 504 {
|
||||
return true
|
||||
}
|
||||
return r.IsErrorThrottle()
|
||||
}
|
||||
|
||||
// lockedSource is a thread-safe implementation of rand.Source
|
||||
type lockedSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
func (r *lockedSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *lockedSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
|
54
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
54
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
|
@ -100,6 +100,31 @@ type Config struct {
|
|||
// Amazon S3: Virtual Hosting of Buckets
|
||||
S3ForcePathStyle *bool
|
||||
|
||||
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
||||
// HTTP client not to send the body until the service responds with a
|
||||
// `continue` status. This is useful to prevent sending the request body
|
||||
// until after the request is authenticated, and validated.
|
||||
//
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||
//
|
||||
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
||||
// `ExpectContinueTimeout` for information on adjusting the continue wait timeout.
|
||||
// https://golang.org/pkg/net/http/#Transport
|
||||
//
|
||||
// You should use this flag to disble 100-Continue if you experiance issues
|
||||
// with proxies or thrid party S3 compatible services.
|
||||
S3Disable100Continue *bool
|
||||
|
||||
// Set this to `true` to enable S3 Accelerate feature. For all operations compatible
|
||||
// with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible
|
||||
// will fall back to normal S3 requests.
|
||||
//
|
||||
// The bucket must be enable for accelerate to be used with S3 client with accelerate
|
||||
// enabled. If the bucket is not enabled for accelerate an error will be returned.
|
||||
// The bucket name must be DNS compatible to also work with accelerate.
|
||||
S3UseAccelerate *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
|
||||
// client to create a new http.Client. This options is only meaningful if you're not
|
||||
|
@ -114,13 +139,18 @@ type Config struct {
|
|||
//
|
||||
EC2MetadataDisableTimeoutOverride *bool
|
||||
|
||||
// SleepDelay is an override for the func the SDK will call when sleeping
|
||||
// during the lifecycle of a request. Specifically this will be used for
|
||||
// request delays. This value should only be used for testing. To adjust
|
||||
// the delay of a request see the aws/client.DefaultRetryer and
|
||||
// aws/request.Retryer.
|
||||
SleepDelay func(time.Duration)
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder methods to
|
||||
// set multiple configuration values inline without using pointers.
|
||||
//
|
||||
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
|
||||
// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
|
||||
//
|
||||
func NewConfig() *Config {
|
||||
return &Config{}
|
||||
|
@ -210,6 +240,20 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
|||
return c
|
||||
}
|
||||
|
||||
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
||||
// a Config pointer for chaining.
|
||||
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||
c.S3Disable100Continue = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||
c.S3UseAccelerate = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||
|
@ -288,6 +332,14 @@ func mergeInConfig(dst *Config, other *Config) {
|
|||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||
}
|
||||
|
||||
if other.S3Disable100Continue != nil {
|
||||
dst.S3Disable100Continue = other.S3Disable100Continue
|
||||
}
|
||||
|
||||
if other.S3UseAccelerate != nil {
|
||||
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||
}
|
||||
|
||||
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||
}
|
||||
|
|
24
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
|
@ -2,7 +2,7 @@ package aws
|
|||
|
||||
import "time"
|
||||
|
||||
// String returns a pointer to of the string value passed in.
|
||||
// String returns a pointer to the string value passed in.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func StringValueMap(src map[string]*string) map[string]string {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Bool returns a pointer to of the bool value passed in.
|
||||
// Bool returns a pointer to the bool value passed in.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func BoolValueMap(src map[string]*bool) map[string]bool {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Int returns a pointer to of the int value passed in.
|
||||
// Int returns a pointer to the int value passed in.
|
||||
func Int(v int) *int {
|
||||
return &v
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ func IntValueMap(src map[string]*int) map[string]int {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to of the int64 value passed in.
|
||||
// Int64 returns a pointer to the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to of the float64 value passed in.
|
||||
// Float64 returns a pointer to the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Time returns a pointer to of the time.Time value passed in.
|
||||
// Time returns a pointer to the time.Time value passed in.
|
||||
func Time(v time.Time) *time.Time {
|
||||
return &v
|
||||
}
|
||||
|
@ -311,6 +311,18 @@ func TimeValue(v *time.Time) time.Time {
|
|||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||
//
|
||||
// This utility is useful for service API's such as CloudWatch Logs which require
|
||||
// their unix time values to be in milliseconds.
|
||||
//
|
||||
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
||||
func TimeUnixMilli(t time.Time) int64 {
|
||||
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||
}
|
||||
|
||||
// TimeSlice converts a slice of time.Time values into a slice of
|
||||
// time.Time pointers
|
||||
func TimeSlice(src []time.Time) []*time.Time {
|
||||
|
|
53
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
|
@ -24,30 +24,38 @@ type lener interface {
|
|||
// BuildContentLengthHandler builds the content length of a request based on the body,
|
||||
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||
//
|
||||
// The Content-Length will only be aded to the request if the length of the body
|
||||
// is greater than 0. If the body is empty or the current `Content-Length`
|
||||
// header is <= 0, the header will also be stripped.
|
||||
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ := strconv.ParseInt(slength, 10, 64)
|
||||
r.HTTPRequest.ContentLength = length
|
||||
return
|
||||
}
|
||||
|
||||
var length int64
|
||||
switch body := r.Body.(type) {
|
||||
case nil:
|
||||
length = 0
|
||||
case lener:
|
||||
length = int64(body.Len())
|
||||
case io.Seeker:
|
||||
r.BodyStart, _ = body.Seek(0, 1)
|
||||
end, _ := body.Seek(0, 2)
|
||||
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
||||
length = end - r.BodyStart
|
||||
default:
|
||||
panic("Cannot get length of body, must provide `ContentLength`")
|
||||
|
||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||
} else {
|
||||
switch body := r.Body.(type) {
|
||||
case nil:
|
||||
length = 0
|
||||
case lener:
|
||||
length = int64(body.Len())
|
||||
case io.Seeker:
|
||||
r.BodyStart, _ = body.Seek(0, 1)
|
||||
end, _ := body.Seek(0, 2)
|
||||
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
||||
length = end - r.BodyStart
|
||||
default:
|
||||
panic("Cannot get length of body, must provide `ContentLength`")
|
||||
}
|
||||
}
|
||||
|
||||
r.HTTPRequest.ContentLength = length
|
||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||
if length > 0 {
|
||||
r.HTTPRequest.ContentLength = length
|
||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||
} else {
|
||||
r.HTTPRequest.ContentLength = 0
|
||||
r.HTTPRequest.Header.Del("Content-Length")
|
||||
}
|
||||
}}
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
||||
|
@ -64,6 +72,11 @@ var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *req
|
|||
var err error
|
||||
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
|
||||
if err != nil {
|
||||
// Prevent leaking if an HTTPResponse was returned. Clean up
|
||||
// the body.
|
||||
if r.HTTPResponse != nil {
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
||||
// Capture the case where url.Error is returned for error processing
|
||||
// response. e.g. 301 without location header comes back as string
|
||||
// error and r.HTTPResponse is nil. Other url redirect errors will
|
||||
|
|
139
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
139
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
|
@ -1,144 +1,17 @@
|
|||
package corehandlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
// ValidateParametersHandler is a request handler to validate the input parameters.
|
||||
// Validating parameters only has meaning if done prior to the request being sent.
|
||||
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
||||
if r.ParamsFilled() {
|
||||
v := validator{errors: []string{}}
|
||||
v.validateAny(reflect.ValueOf(r.Params), "")
|
||||
|
||||
if count := len(v.errors); count > 0 {
|
||||
format := "%d validation errors:\n- %s"
|
||||
msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
|
||||
r.Error = awserr.New("InvalidParameter", msg, nil)
|
||||
}
|
||||
}
|
||||
}}
|
||||
|
||||
// A validator validates values. Collects validations errors which occurs.
|
||||
type validator struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
// validateAny will validate any struct, slice or map type. All validations
|
||||
// are also performed recursively for nested types.
|
||||
func (v *validator) validateAny(value reflect.Value, path string) {
|
||||
value = reflect.Indirect(value)
|
||||
if !value.IsValid() {
|
||||
if !r.ParamsFilled() {
|
||||
return
|
||||
}
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
v.validateStruct(value, path)
|
||||
case reflect.Slice:
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
case reflect.Map:
|
||||
for _, n := range value.MapKeys() {
|
||||
v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
|
||||
if v, ok := r.Params.(request.Validator); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
r.Error = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateStruct will validate the struct value's fields. If the structure has
|
||||
// nested types those types will be validated also.
|
||||
func (v *validator) validateStruct(value reflect.Value, path string) {
|
||||
prefix := "."
|
||||
if path == "" {
|
||||
prefix = ""
|
||||
}
|
||||
|
||||
for i := 0; i < value.Type().NumField(); i++ {
|
||||
f := value.Type().Field(i)
|
||||
if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
|
||||
continue
|
||||
}
|
||||
fvalue := value.FieldByName(f.Name)
|
||||
|
||||
err := validateField(f, fvalue, validateFieldRequired, validateFieldMin)
|
||||
if err != nil {
|
||||
v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
v.validateAny(fvalue, path+prefix+f.Name)
|
||||
}
|
||||
}
|
||||
|
||||
type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error
|
||||
|
||||
func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
|
||||
for _, fn := range funcs {
|
||||
if err := fn(f, fvalue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validates that a field has a valid value provided for required fields.
|
||||
func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
|
||||
if f.Tag.Get("required") == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch fvalue.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.Map:
|
||||
if fvalue.IsNil() {
|
||||
return fmt.Errorf("missing required parameter")
|
||||
}
|
||||
default:
|
||||
if !fvalue.IsValid() {
|
||||
return fmt.Errorf("missing required parameter")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validates that if a value is provided for a field, that value must be at
|
||||
// least a minimum length.
|
||||
func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
|
||||
minStr := f.Tag.Get("min")
|
||||
if minStr == "" {
|
||||
return nil
|
||||
}
|
||||
min, _ := strconv.ParseInt(minStr, 10, 64)
|
||||
|
||||
kind := fvalue.Kind()
|
||||
if kind == reflect.Ptr {
|
||||
if fvalue.IsNil() {
|
||||
return nil
|
||||
}
|
||||
fvalue = fvalue.Elem()
|
||||
}
|
||||
|
||||
switch fvalue.Kind() {
|
||||
case reflect.String:
|
||||
if int64(fvalue.Len()) < min {
|
||||
return fmt.Errorf("field too short, minimum length %d", min)
|
||||
}
|
||||
case reflect.Slice, reflect.Map:
|
||||
if fvalue.IsNil() {
|
||||
return nil
|
||||
}
|
||||
if int64(fvalue.Len()) < min {
|
||||
return fmt.Errorf("field too short, minimum length %d", min)
|
||||
}
|
||||
|
||||
// TODO min can also apply to number minimum value.
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}}
|
||||
|
|
8
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
|
@ -132,7 +132,7 @@ const iamSecurityCredsPath = "/iam/security-credentials"
|
|||
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
||||
if err != nil {
|
||||
return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err)
|
||||
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||
}
|
||||
|
||||
credsList := []string{}
|
||||
|
@ -142,7 +142,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
|||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err)
|
||||
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
|
||||
}
|
||||
|
||||
return credsList, nil
|
||||
|
@ -157,7 +157,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
|
|||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
|
||||
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
|
@ -165,7 +165,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
|
|||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("SerializationError",
|
||||
fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
|
||||
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
|
@ -14,7 +14,7 @@ var (
|
|||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||
)
|
||||
|
||||
// A StaticProvider is a set of credentials which are set pragmatically,
|
||||
// A StaticProvider is a set of credentials which are set programmatically,
|
||||
// and will never expire.
|
||||
type StaticProvider struct {
|
||||
Value
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
|
@ -66,7 +66,9 @@ func Handlers() request.Handlers {
|
|||
var handlers request.Handlers
|
||||
|
||||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||
|
|
99
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
99
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
|
@ -1,12 +1,19 @@
|
|||
package ec2metadata
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// GetMetadata uses the path provided to request
|
||||
// GetMetadata uses the path provided to request information from the EC2
|
||||
// instance metdata service. The content will be returned as a string, or
|
||||
// error if the request failed.
|
||||
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
|
@ -20,6 +27,68 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
|||
return output.Content, req.Send()
|
||||
}
|
||||
|
||||
// GetDynamicData uses the path provided to request information from the EC2
|
||||
// instance metadata service for dynamic data. The content will be returned
|
||||
// as a string, or error if the request failed.
|
||||
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "dynamic", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
|
||||
return output.Content, req.Send()
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||
// instance. Error is returned if the request fails or is unable to parse
|
||||
// the response.
|
||||
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||
resp, err := c.GetDynamicData("instance-identity/document")
|
||||
if err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
"failed to get EC2 instance identity document", err)
|
||||
}
|
||||
|
||||
doc := EC2InstanceIdentityDocument{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New("SerializationError",
|
||||
"failed to decode EC2 instance identity document", err)
|
||||
}
|
||||
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// IAMInfo retrieves IAM info from the metadata API
|
||||
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||
resp, err := c.GetMetadata("iam/info")
|
||||
if err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
"failed to get EC2 IAM info", err)
|
||||
}
|
||||
|
||||
info := EC2IAMInfo{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("SerializationError",
|
||||
"failed to decode EC2 IAM info", err)
|
||||
}
|
||||
|
||||
if info.Code != "Success" {
|
||||
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataError", errMsg, nil)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Region returns the region the instance is running in.
|
||||
func (c *EC2Metadata) Region() (string, error) {
|
||||
resp, err := c.GetMetadata("placement/availability-zone")
|
||||
|
@ -41,3 +110,31 @@ func (c *EC2Metadata) Available() bool {
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
// An EC2IAMInfo provides the shape for unmarshalling
|
||||
// an IAM info from the metadata API
|
||||
type EC2IAMInfo struct {
|
||||
Code string
|
||||
LastUpdated time.Time
|
||||
InstanceProfileArn string
|
||||
InstanceProfileID string
|
||||
}
|
||||
|
||||
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
|
||||
// an instance identity document
|
||||
type EC2InstanceIdentityDocument struct {
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
}
|
||||
|
|
23
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
23
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
|
@ -3,7 +3,9 @@
|
|||
package ec2metadata
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
@ -91,23 +93,28 @@ type metadataOutput struct {
|
|||
|
||||
func unmarshalHandler(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
b := &bytes.Buffer{}
|
||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
||||
return
|
||||
}
|
||||
|
||||
data := r.Data.(*metadataOutput)
|
||||
data.Content = string(b)
|
||||
if data, ok := r.Data.(*metadataOutput); ok {
|
||||
data.Content = b.String()
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
_, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
b := &bytes.Buffer{}
|
||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO extract the error...
|
||||
// Response body format is not consistent between metadata endpoints.
|
||||
// Grab the error message as a string and include that as the source error
|
||||
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
|
||||
}
|
||||
|
||||
func validateEndpointHandler(r *request.Request) {
|
||||
|
|
14
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
|
@ -79,6 +79,20 @@ type Logger interface {
|
|||
Log(...interface{})
|
||||
}
|
||||
|
||||
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
||||
// list of arguments and wrap it so the Logger interface can be used.
|
||||
//
|
||||
// Example:
|
||||
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
||||
// fmt.Fprintln(os.Stdout, args...)
|
||||
// })})
|
||||
type LoggerFunc func(...interface{})
|
||||
|
||||
// Log calls the wrapped function with the arguments provided
|
||||
func (f LoggerFunc) Log(args ...interface{}) {
|
||||
f(args...)
|
||||
}
|
||||
|
||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||
// use same formatting runes as the stdlib log.Logger
|
||||
func NewDefaultLogger() Logger {
|
||||
|
|
53
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
|
@ -50,9 +50,28 @@ func (h *Handlers) Clear() {
|
|||
h.AfterRetry.Clear()
|
||||
}
|
||||
|
||||
// A HandlerListRunItem represents an entry in the HandlerList which
|
||||
// is being run.
|
||||
type HandlerListRunItem struct {
|
||||
Index int
|
||||
Handler NamedHandler
|
||||
Request *Request
|
||||
}
|
||||
|
||||
// A HandlerList manages zero or more handlers in a list.
|
||||
type HandlerList struct {
|
||||
list []NamedHandler
|
||||
|
||||
// Called after each request handler in the list is called. If set
|
||||
// and the func returns true the HandlerList will continue to iterate
|
||||
// over the request handlers. If false is returned the HandlerList
|
||||
// will stop iterating.
|
||||
//
|
||||
// Should be used if extra logic to be performed between each handler
|
||||
// in the list. This can be used to terminate a list's iteration
|
||||
// based on a condition such as error like, HandlerListStopOnError.
|
||||
// Or for logging like HandlerListLogItem.
|
||||
AfterEachFn func(item HandlerListRunItem) bool
|
||||
}
|
||||
|
||||
// A NamedHandler is a struct that contains a name and function callback.
|
||||
|
@ -63,7 +82,9 @@ type NamedHandler struct {
|
|||
|
||||
// copy creates a copy of the handler list.
|
||||
func (l *HandlerList) copy() HandlerList {
|
||||
var n HandlerList
|
||||
n := HandlerList{
|
||||
AfterEachFn: l.AfterEachFn,
|
||||
}
|
||||
n.list = append([]NamedHandler{}, l.list...)
|
||||
return n
|
||||
}
|
||||
|
@ -111,11 +132,37 @@ func (l *HandlerList) Remove(n NamedHandler) {
|
|||
|
||||
// Run executes all handlers in the list with a given request object.
|
||||
func (l *HandlerList) Run(r *Request) {
|
||||
for _, f := range l.list {
|
||||
f.Fn(r)
|
||||
for i, h := range l.list {
|
||||
h.Fn(r)
|
||||
item := HandlerListRunItem{
|
||||
Index: i, Handler: h, Request: r,
|
||||
}
|
||||
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandlerListLogItem logs the request handler and the state of the
|
||||
// request's Error value. Always returns true to continue iterating
|
||||
// request handlers in a HandlerList.
|
||||
func HandlerListLogItem(item HandlerListRunItem) bool {
|
||||
if item.Request.Config.Logger == nil {
|
||||
return true
|
||||
}
|
||||
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
||||
item.Index, item.Handler.Name, item.Request.Error)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
||||
// over request handlers if Request.Error is not nil. True otherwise
|
||||
// to continue iterating.
|
||||
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
||||
return item.Request.Error == nil
|
||||
}
|
||||
|
||||
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
||||
// header. If the extra parameters are provided they will be added as metadata to the
|
||||
// name/version pair resulting in the following format.
|
||||
|
|
33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// +build go1.5
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||
req := &http.Request{
|
||||
URL: &url.URL{},
|
||||
Header: http.Header{},
|
||||
Close: r.Close,
|
||||
Body: body,
|
||||
Host: r.Host,
|
||||
Method: r.Method,
|
||||
Proto: r.Proto,
|
||||
ContentLength: r.ContentLength,
|
||||
// Cancel will be deprecated in 1.7 and will be replaced with Context
|
||||
Cancel: r.Cancel,
|
||||
}
|
||||
|
||||
*req.URL = *r.URL
|
||||
for k, v := range r.Header {
|
||||
for _, vv := range v {
|
||||
req.Header.Add(k, vv)
|
||||
}
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
generated
vendored
Normal file
31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// +build !go1.5
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||
req := &http.Request{
|
||||
URL: &url.URL{},
|
||||
Header: http.Header{},
|
||||
Close: r.Close,
|
||||
Body: body,
|
||||
Host: r.Host,
|
||||
Method: r.Method,
|
||||
Proto: r.Proto,
|
||||
ContentLength: r.ContentLength,
|
||||
}
|
||||
|
||||
*req.URL = *r.URL
|
||||
for k, v := range r.Header {
|
||||
for _, vv := range v {
|
||||
req.Header.Add(k, vv)
|
||||
}
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
package request
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||
// with retrying requests
|
||||
type offsetReader struct {
|
||||
buf io.ReadSeeker
|
||||
lock sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
||||
reader := &offsetReader{}
|
||||
buf.Seek(offset, 0)
|
||||
|
||||
reader.buf = buf
|
||||
return reader
|
||||
}
|
||||
|
||||
// Close is a thread-safe close. Uses the write lock.
|
||||
func (o *offsetReader) Close() error {
|
||||
o.lock.Lock()
|
||||
defer o.lock.Unlock()
|
||||
o.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read is a thread-safe read using a read lock.
|
||||
func (o *offsetReader) Read(p []byte) (int, error) {
|
||||
o.lock.RLock()
|
||||
defer o.lock.RUnlock()
|
||||
|
||||
if o.closed {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
return o.buf.Read(p)
|
||||
}
|
||||
|
||||
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
|
||||
// and close the old buffer.
|
||||
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
|
||||
o.Close()
|
||||
return newOffsetReader(o.buf, offset)
|
||||
}
|
64
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
|
@ -12,6 +12,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
)
|
||||
|
||||
|
@ -38,6 +39,7 @@ type Request struct {
|
|||
RetryDelay time.Duration
|
||||
NotHoist bool
|
||||
SignedHeaderVals http.Header
|
||||
LastSignedAt time.Time
|
||||
|
||||
built bool
|
||||
}
|
||||
|
@ -77,7 +79,13 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
|||
}
|
||||
|
||||
httpReq, _ := http.NewRequest(method, "", nil)
|
||||
httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p)
|
||||
|
||||
var err error
|
||||
httpReq.URL, err = url.Parse(clientInfo.Endpoint + p)
|
||||
if err != nil {
|
||||
httpReq.URL = &url.URL{}
|
||||
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
||||
}
|
||||
|
||||
r := &Request{
|
||||
Config: cfg,
|
||||
|
@ -91,7 +99,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
|||
HTTPRequest: httpReq,
|
||||
Body: nil,
|
||||
Params: params,
|
||||
Error: nil,
|
||||
Error: err,
|
||||
Data: data,
|
||||
}
|
||||
r.SetBufferBody([]byte{})
|
||||
|
@ -131,7 +139,7 @@ func (r *Request) SetStringBody(s string) {
|
|||
|
||||
// SetReaderBody will set the request's body reader.
|
||||
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||
r.HTTPRequest.Body = ioutil.NopCloser(reader)
|
||||
r.HTTPRequest.Body = newOffsetReader(reader, 0)
|
||||
r.Body = reader
|
||||
}
|
||||
|
||||
|
@ -185,20 +193,23 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
|||
// which occurred will be returned.
|
||||
func (r *Request) Build() error {
|
||||
if !r.built {
|
||||
r.Error = nil
|
||||
r.Handlers.Validate.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Validate Request", false, r.Error)
|
||||
return r.Error
|
||||
}
|
||||
r.Handlers.Build.Run(r)
|
||||
if r.Error != nil {
|
||||
debugLogReqError(r, "Build Request", false, r.Error)
|
||||
return r.Error
|
||||
}
|
||||
r.built = true
|
||||
}
|
||||
|
||||
return r.Error
|
||||
}
|
||||
|
||||
// Sign will sign the request retuning error if errors are encountered.
|
||||
// Sign will sign the request returning error if errors are encountered.
|
||||
//
|
||||
// Send will build the request prior to signing. All Sign Handlers will
|
||||
// be executed in the order they were set.
|
||||
|
@ -217,28 +228,53 @@ func (r *Request) Sign() error {
|
|||
//
|
||||
// Send will sign the request prior to sending. All Send Handlers will
|
||||
// be executed in the order they were set.
|
||||
//
|
||||
// Canceling a request is non-deterministic. If a request has been canceled,
|
||||
// then the transport will choose, randomly, one of the state channels during
|
||||
// reads or getting the connection.
|
||||
//
|
||||
// readLoop() and getConn(req *Request, cm connectMethod)
|
||||
// https://github.com/golang/go/blob/master/src/net/http/transport.go
|
||||
func (r *Request) Send() error {
|
||||
for {
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
if aws.BoolValue(r.Retryable) {
|
||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
||||
}
|
||||
|
||||
// Re-seek the body back to the original point in for a retry so that
|
||||
// send will send the body's contents again in the upcoming request.
|
||||
r.Body.Seek(r.BodyStart, 0)
|
||||
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
|
||||
var body io.ReadCloser
|
||||
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
|
||||
body = reader.CloseAndCopy(r.BodyStart)
|
||||
} else {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
|
||||
}
|
||||
r.Body.Seek(r.BodyStart, 0)
|
||||
body = ioutil.NopCloser(r.Body)
|
||||
}
|
||||
|
||||
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
|
||||
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
||||
// Closing response body. Since we are setting a new request to send off, this
|
||||
// response will get squashed and leaked.
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
r.Retryable = nil
|
||||
|
||||
r.Handlers.Send.Run(r)
|
||||
if r.Error != nil {
|
||||
if strings.Contains(r.Error.Error(), "net/http: request canceled") {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
err := r.Error
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
|
|
23
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
23
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
@ -26,8 +26,11 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
|||
// retryableCodes is a collection of service response codes which are retry-able
|
||||
// without any further action.
|
||||
var retryableCodes = map[string]struct{}{
|
||||
"RequestError": {},
|
||||
"RequestTimeout": {},
|
||||
"RequestError": {},
|
||||
"RequestTimeout": {},
|
||||
}
|
||||
|
||||
var throttleCodes = map[string]struct{}{
|
||||
"ProvisionedThroughputExceededException": {},
|
||||
"Throttling": {},
|
||||
"ThrottlingException": {},
|
||||
|
@ -46,6 +49,11 @@ var credsExpiredCodes = map[string]struct{}{
|
|||
"RequestExpired": {}, // EC2 Only
|
||||
}
|
||||
|
||||
func isCodeThrottle(code string) bool {
|
||||
_, ok := throttleCodes[code]
|
||||
return ok
|
||||
}
|
||||
|
||||
func isCodeRetryable(code string) bool {
|
||||
if _, ok := retryableCodes[code]; ok {
|
||||
return true
|
||||
|
@ -70,6 +78,17 @@ func (r *Request) IsErrorRetryable() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||
// Returns false if the request has no Error set
|
||||
func (r *Request) IsErrorThrottle() bool {
|
||||
if r.Error != nil {
|
||||
if err, ok := r.Error.(awserr.Error); ok {
|
||||
return isCodeThrottle(err.Code())
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsErrorExpired returns whether the error code is a credential expiry error.
|
||||
// Returns false if the request has no Error set.
|
||||
func (r *Request) IsErrorExpired() bool {
|
||||
|
|
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
Normal file
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
Normal file
|
@ -0,0 +1,234 @@
|
|||
package request
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
const (
|
||||
// InvalidParameterErrCode is the error code for invalid parameters errors
|
||||
InvalidParameterErrCode = "InvalidParameter"
|
||||
// ParamRequiredErrCode is the error code for required parameter errors
|
||||
ParamRequiredErrCode = "ParamRequiredError"
|
||||
// ParamMinValueErrCode is the error code for fields with too low of a
|
||||
// number value.
|
||||
ParamMinValueErrCode = "ParamMinValueError"
|
||||
// ParamMinLenErrCode is the error code for fields without enough elements.
|
||||
ParamMinLenErrCode = "ParamMinLenError"
|
||||
)
|
||||
|
||||
// Validator provides a way for types to perform validation logic on their
|
||||
// input values that external code can use to determine if a type's values
|
||||
// are valid.
|
||||
type Validator interface {
|
||||
Validate() error
|
||||
}
|
||||
|
||||
// An ErrInvalidParams provides wrapping of invalid parameter errors found when
|
||||
// validating API operation input parameters.
|
||||
type ErrInvalidParams struct {
|
||||
// Context is the base context of the invalid parameter group.
|
||||
Context string
|
||||
errs []ErrInvalidParam
|
||||
}
|
||||
|
||||
// Add adds a new invalid parameter error to the collection of invalid
|
||||
// parameters. The context of the invalid parameter will be updated to reflect
|
||||
// this collection.
|
||||
func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
|
||||
err.SetContext(e.Context)
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
|
||||
// AddNested adds the invalid parameter errors from another ErrInvalidParams
|
||||
// value into this collection. The nested errors will have their nested context
|
||||
// updated and base context to reflect the merging.
|
||||
//
|
||||
// Use for nested validations errors.
|
||||
func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
|
||||
for _, err := range nested.errs {
|
||||
err.SetContext(e.Context)
|
||||
err.AddNestedContext(nestedCtx)
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of invalid parameter errors
|
||||
func (e ErrInvalidParams) Len() int {
|
||||
return len(e.errs)
|
||||
}
|
||||
|
||||
// Code returns the code of the error
|
||||
func (e ErrInvalidParams) Code() string {
|
||||
return InvalidParameterErrCode
|
||||
}
|
||||
|
||||
// Message returns the message of the error
|
||||
func (e ErrInvalidParams) Message() string {
|
||||
return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
|
||||
}
|
||||
|
||||
// Error returns the string formatted form of the invalid parameters.
|
||||
func (e ErrInvalidParams) Error() string {
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
|
||||
|
||||
for _, err := range e.errs {
|
||||
fmt.Fprintf(w, "- %s\n", err.Message())
|
||||
}
|
||||
|
||||
return w.String()
|
||||
}
|
||||
|
||||
// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
|
||||
func (e ErrInvalidParams) OrigErr() error {
|
||||
return awserr.NewBatchError(
|
||||
InvalidParameterErrCode, e.Message(), e.OrigErrs())
|
||||
}
|
||||
|
||||
// OrigErrs returns a slice of the invalid parameters
|
||||
func (e ErrInvalidParams) OrigErrs() []error {
|
||||
errs := make([]error, len(e.errs))
|
||||
for i := 0; i < len(errs); i++ {
|
||||
errs[i] = e.errs[i]
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// An ErrInvalidParam represents an invalid parameter error type.
|
||||
type ErrInvalidParam interface {
|
||||
awserr.Error
|
||||
|
||||
// Field name the error occurred on.
|
||||
Field() string
|
||||
|
||||
// SetContext updates the context of the error.
|
||||
SetContext(string)
|
||||
|
||||
// AddNestedContext updates the error's context to include a nested level.
|
||||
AddNestedContext(string)
|
||||
}
|
||||
|
||||
type errInvalidParam struct {
|
||||
context string
|
||||
nestedContext string
|
||||
field string
|
||||
code string
|
||||
msg string
|
||||
}
|
||||
|
||||
// Code returns the error code for the type of invalid parameter.
|
||||
func (e *errInvalidParam) Code() string {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// Message returns the reason the parameter was invalid, and its context.
|
||||
func (e *errInvalidParam) Message() string {
|
||||
return fmt.Sprintf("%s, %s.", e.msg, e.Field())
|
||||
}
|
||||
|
||||
// Error returns the string version of the invalid parameter error.
|
||||
func (e *errInvalidParam) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.code, e.Message())
|
||||
}
|
||||
|
||||
// OrigErr returns nil, Implemented for awserr.Error interface.
|
||||
func (e *errInvalidParam) OrigErr() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Field Returns the field and context the error occurred.
|
||||
func (e *errInvalidParam) Field() string {
|
||||
field := e.context
|
||||
if len(field) > 0 {
|
||||
field += "."
|
||||
}
|
||||
if len(e.nestedContext) > 0 {
|
||||
field += fmt.Sprintf("%s.", e.nestedContext)
|
||||
}
|
||||
field += e.field
|
||||
|
||||
return field
|
||||
}
|
||||
|
||||
// SetContext updates the base context of the error.
|
||||
func (e *errInvalidParam) SetContext(ctx string) {
|
||||
e.context = ctx
|
||||
}
|
||||
|
||||
// AddNestedContext prepends a context to the field's path.
|
||||
func (e *errInvalidParam) AddNestedContext(ctx string) {
|
||||
if len(e.nestedContext) == 0 {
|
||||
e.nestedContext = ctx
|
||||
} else {
|
||||
e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// An ErrParamRequired represents an required parameter error.
|
||||
type ErrParamRequired struct {
|
||||
errInvalidParam
|
||||
}
|
||||
|
||||
// NewErrParamRequired creates a new required parameter error.
|
||||
func NewErrParamRequired(field string) *ErrParamRequired {
|
||||
return &ErrParamRequired{
|
||||
errInvalidParam{
|
||||
code: ParamRequiredErrCode,
|
||||
field: field,
|
||||
msg: fmt.Sprintf("missing required field"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// An ErrParamMinValue represents a minimum value parameter error.
|
||||
type ErrParamMinValue struct {
|
||||
errInvalidParam
|
||||
min float64
|
||||
}
|
||||
|
||||
// NewErrParamMinValue creates a new minimum value parameter error.
|
||||
func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
|
||||
return &ErrParamMinValue{
|
||||
errInvalidParam: errInvalidParam{
|
||||
code: ParamMinValueErrCode,
|
||||
field: field,
|
||||
msg: fmt.Sprintf("minimum field value of %v", min),
|
||||
},
|
||||
min: min,
|
||||
}
|
||||
}
|
||||
|
||||
// MinValue returns the field's require minimum value.
|
||||
//
|
||||
// float64 is returned for both int and float min values.
|
||||
func (e *ErrParamMinValue) MinValue() float64 {
|
||||
return e.min
|
||||
}
|
||||
|
||||
// An ErrParamMinLen represents a minimum length parameter error.
|
||||
type ErrParamMinLen struct {
|
||||
errInvalidParam
|
||||
min int
|
||||
}
|
||||
|
||||
// NewErrParamMinLen creates a new minimum length parameter error.
|
||||
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
|
||||
return &ErrParamMinLen{
|
||||
errInvalidParam: errInvalidParam{
|
||||
code: ParamMinValueErrCode,
|
||||
field: field,
|
||||
msg: fmt.Sprintf("minimum field size of %v", min),
|
||||
},
|
||||
min: min,
|
||||
}
|
||||
}
|
||||
|
||||
// MinLen returns the field's required minimum length.
|
||||
func (e *ErrParamMinLen) MinLen() int {
|
||||
return e.min
|
||||
}
|
2
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -86,7 +86,7 @@ func initHandlers(s *Session) {
|
|||
//
|
||||
// Example:
|
||||
// // Create a copy of the current session, configured for the us-west-2 region.
|
||||
// sess.Copy(&aws.Config{Region: aws.String("us-west-2"})
|
||||
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
|
||||
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
|
||||
newSession := &Session{
|
||||
Config: s.Config.Copy(cfgs...),
|
||||
|
|
644
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
Normal file
644
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
Normal file
|
@ -0,0 +1,644 @@
|
|||
// Package v4 implements signing for AWS V4 signer
|
||||
//
|
||||
// Provides request signing for request that need to be signed with
|
||||
// AWS V4 Signatures.
|
||||
package v4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
timeFormat = "20060102T150405Z"
|
||||
shortTimeFormat = "20060102"
|
||||
|
||||
// emptyStringSHA256 is a SHA256 of an empty string
|
||||
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
||||
)
|
||||
|
||||
var ignoredHeaders = rules{
|
||||
blacklist{
|
||||
mapRule{
|
||||
"Authorization": struct{}{},
|
||||
"User-Agent": struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// requiredSignedHeaders is a whitelist for build canonical headers.
|
||||
var requiredSignedHeaders = rules{
|
||||
whitelist{
|
||||
mapRule{
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Grant-Full-control": struct{}{},
|
||||
"X-Amz-Grant-Read": struct{}{},
|
||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||
"X-Amz-Grant-Write": struct{}{},
|
||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||
"X-Amz-Metadata-Directive": struct{}{},
|
||||
"X-Amz-Mfa": struct{}{},
|
||||
"X-Amz-Request-Payer": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
},
|
||||
},
|
||||
patterns{"X-Amz-Meta-"},
|
||||
}
|
||||
|
||||
// allowedHoisting is a whitelist for build query headers. The boolean value
|
||||
// represents whether or not it is a pattern.
|
||||
var allowedQueryHoisting = inclusiveRules{
|
||||
blacklist{requiredSignedHeaders},
|
||||
patterns{"X-Amz-"},
|
||||
}
|
||||
|
||||
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
||||
// that need to be signed with AWS V4 Signatures.
|
||||
type Signer struct {
|
||||
// The authentication credentials the request will be signed against.
|
||||
// This value must be set to sign requests.
|
||||
Credentials *credentials.Credentials
|
||||
|
||||
// Sets the log level the signer should use when reporting information to
|
||||
// the logger. If the logger is nil nothing will be logged. See
|
||||
// aws.LogLevelType for more information on available logging levels
|
||||
//
|
||||
// By default nothing will be logged.
|
||||
Debug aws.LogLevelType
|
||||
|
||||
// The logger loging information will be written to. If there the logger
|
||||
// is nil, nothing will be logged.
|
||||
Logger aws.Logger
|
||||
|
||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
||||
// request header to the request's query string. This is most commonly used
|
||||
// with pre-signed requests preventing headers from being added to the
|
||||
// request's query string.
|
||||
DisableHeaderHoisting bool
|
||||
|
||||
// currentTimeFn returns the time value which represents the current time.
|
||||
// This value should only be used for testing. If it is nil the default
|
||||
// time.Now will be used.
|
||||
currentTimeFn func() time.Time
|
||||
}
|
||||
|
||||
// NewSigner returns a Signer pointer configured with the credentials and optional
|
||||
// option values provided. If not options are provided the Signer will use its
|
||||
// default configuration.
|
||||
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
|
||||
v4 := &Signer{
|
||||
Credentials: credentials,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(v4)
|
||||
}
|
||||
|
||||
return v4
|
||||
}
|
||||
|
||||
type signingCtx struct {
|
||||
ServiceName string
|
||||
Region string
|
||||
Request *http.Request
|
||||
Body io.ReadSeeker
|
||||
Query url.Values
|
||||
Time time.Time
|
||||
ExpireTime time.Duration
|
||||
SignedHeaderVals http.Header
|
||||
|
||||
credValues credentials.Value
|
||||
isPresign bool
|
||||
formattedTime string
|
||||
formattedShortTime string
|
||||
|
||||
bodyDigest string
|
||||
signedHeaders string
|
||||
canonicalHeaders string
|
||||
canonicalString string
|
||||
credentialString string
|
||||
stringToSign string
|
||||
signature string
|
||||
authorization string
|
||||
}
|
||||
|
||||
// Sign signs AWS v4 requests with the provided body, service name, region the
|
||||
// request is made to, and time the request is signed at. The signTime allows
|
||||
// you to specify that a request is signed for the future, and cannot be
|
||||
// used until then.
|
||||
//
|
||||
// Returns a list of HTTP headers that were included in the signature or an
|
||||
// error if signing the request failed. Generally for signed requests this value
|
||||
// is not needed as the full request context will be captured by the http.Request
|
||||
// value. It is included for reference though.
|
||||
//
|
||||
// Sign differs from Presign in that it will sign the request using HTTP
|
||||
// header values. This type of signing is intended for http.Request values that
|
||||
// will not be shared, or are shared in a way the header values on the request
|
||||
// will not be lost.
|
||||
//
|
||||
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||
// generated. To bypass the signer computing the hash you can set the
|
||||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||
// only compute the hash if the request header value is empty.
|
||||
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
||||
return v4.signWithBody(r, body, service, region, 0, signTime)
|
||||
}
|
||||
|
||||
// Presign signs AWS v4 requests with the provided body, service name, region
|
||||
// the request is made to, and time the request is signed at. The signTime
|
||||
// allows you to specify that a request is signed for the future, and cannot
|
||||
// be used until then.
|
||||
//
|
||||
// Returns a list of HTTP headers that were included in the signature or an
|
||||
// error if signing the request failed. For presigned requests these headers
|
||||
// and their values must be included on the HTTP request when it is made. This
|
||||
// is helpful to know what header values need to be shared with the party the
|
||||
// presigned request will be distributed to.
|
||||
//
|
||||
// Presign differs from Sign in that it will sign the request using query string
|
||||
// instead of header values. This allows you to share the Presigned Request's
|
||||
// URL with third parties, or distribute it throughout your system with minimal
|
||||
// dependencies.
|
||||
//
|
||||
// Presign also takes an exp value which is the duration the
|
||||
// signed request will be valid after the signing time. This is allows you to
|
||||
// set when the request will expire.
|
||||
//
|
||||
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||
// generated. To bypass the signer computing the hash you can set the
|
||||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||
// only compute the hash if the request header value is empty.
|
||||
//
|
||||
// Presigning a S3 request will not compute the body's SHA256 hash by default.
|
||||
// This is done due to the general use case for S3 presigned URLs is to share
|
||||
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
|
||||
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
||||
// HTTP header and that will be included in the request's signature.
|
||||
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||
return v4.signWithBody(r, body, service, region, exp, signTime)
|
||||
}
|
||||
|
||||
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||
currentTimeFn := v4.currentTimeFn
|
||||
if currentTimeFn == nil {
|
||||
currentTimeFn = time.Now
|
||||
}
|
||||
|
||||
ctx := &signingCtx{
|
||||
Request: r,
|
||||
Body: body,
|
||||
Query: r.URL.Query(),
|
||||
Time: signTime,
|
||||
ExpireTime: exp,
|
||||
isPresign: exp != 0,
|
||||
ServiceName: service,
|
||||
Region: region,
|
||||
}
|
||||
|
||||
if ctx.isRequestSigned() {
|
||||
if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) {
|
||||
// If the request is already signed, and the credentials have not
|
||||
// expired, and the request is not too old ignore the signing request.
|
||||
return ctx.SignedHeaderVals, nil
|
||||
}
|
||||
ctx.Time = currentTimeFn()
|
||||
ctx.handlePresignRemoval()
|
||||
}
|
||||
|
||||
var err error
|
||||
ctx.credValues, err = v4.Credentials.Get()
|
||||
if err != nil {
|
||||
return http.Header{}, err
|
||||
}
|
||||
|
||||
ctx.assignAmzQueryValues()
|
||||
ctx.build(v4.DisableHeaderHoisting)
|
||||
|
||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||
v4.logSigningInfo(ctx)
|
||||
}
|
||||
|
||||
return ctx.SignedHeaderVals, nil
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) handlePresignRemoval() {
|
||||
if !ctx.isPresign {
|
||||
return
|
||||
}
|
||||
|
||||
// The credentials have expired for this request. The current signing
|
||||
// is invalid, and needs to be request because the request will fail.
|
||||
ctx.removePresign()
|
||||
|
||||
// Update the request's query string to ensure the values stays in
|
||||
// sync in the case retrieving the new credentials fails.
|
||||
ctx.Request.URL.RawQuery = ctx.Query.Encode()
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) assignAmzQueryValues() {
|
||||
if ctx.isPresign {
|
||||
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||
if ctx.credValues.SessionToken != "" {
|
||||
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||
} else {
|
||||
ctx.Query.Del("X-Amz-Security-Token")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if ctx.credValues.SessionToken != "" {
|
||||
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||
}
|
||||
}
|
||||
|
||||
// SignRequestHandler is a named request handler the SDK will use to sign
|
||||
// service client request with using the V4 signature.
|
||||
var SignRequestHandler = request.NamedHandler{
|
||||
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
|
||||
}
|
||||
|
||||
// SignSDKRequest signs an AWS request with the V4 signature. This
|
||||
// request handler is bested used only with the SDK's built in service client's
|
||||
// API operation requests.
|
||||
//
|
||||
// This function should not be used on its on its own, but in conjunction with
|
||||
// an AWS service client's API operation call. To sign a standalone request
|
||||
// not created by a service client's API operation method use the "Sign" or
|
||||
// "Presign" functions of the "Signer" type.
|
||||
//
|
||||
// If the credentials of the request's config are set to
|
||||
// credentials.AnonymousCredentials the request will not be signed.
|
||||
func SignSDKRequest(req *request.Request) {
|
||||
signSDKRequestWithCurrTime(req, time.Now)
|
||||
}
|
||||
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
|
||||
// If the request does not need to be signed ignore the signing of the
|
||||
// request if the AnonymousCredentials object is used.
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
|
||||
region := req.ClientInfo.SigningRegion
|
||||
if region == "" {
|
||||
region = aws.StringValue(req.Config.Region)
|
||||
}
|
||||
|
||||
name := req.ClientInfo.SigningName
|
||||
if name == "" {
|
||||
name = req.ClientInfo.ServiceName
|
||||
}
|
||||
|
||||
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
|
||||
v4.Debug = req.Config.LogLevel.Value()
|
||||
v4.Logger = req.Config.Logger
|
||||
v4.DisableHeaderHoisting = req.NotHoist
|
||||
v4.currentTimeFn = curTimeFn
|
||||
})
|
||||
|
||||
signingTime := req.Time
|
||||
if !req.LastSignedAt.IsZero() {
|
||||
signingTime = req.LastSignedAt
|
||||
}
|
||||
|
||||
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
req.SignedHeaderVals = nil
|
||||
return
|
||||
}
|
||||
|
||||
req.SignedHeaderVals = signedHeaders
|
||||
req.LastSignedAt = curTimeFn()
|
||||
}
|
||||
|
||||
const logSignInfoMsg = `DEBUG: Request Signiture:
|
||||
---[ CANONICAL STRING ]-----------------------------
|
||||
%s
|
||||
---[ STRING TO SIGN ]--------------------------------
|
||||
%s%s
|
||||
-----------------------------------------------------`
|
||||
const logSignedURLMsg = `
|
||||
---[ SIGNED URL ]------------------------------------
|
||||
%s`
|
||||
|
||||
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
||||
signedURLMsg := ""
|
||||
if ctx.isPresign {
|
||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
|
||||
}
|
||||
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
|
||||
v4.Logger.Log(msg)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
||||
ctx.buildTime() // no depends
|
||||
ctx.buildCredentialString() // no depends
|
||||
|
||||
unsignedHeaders := ctx.Request.Header
|
||||
if ctx.isPresign {
|
||||
if !disableHeaderHoisting {
|
||||
urlValues := url.Values{}
|
||||
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
||||
for k := range urlValues {
|
||||
ctx.Query[k] = urlValues[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ctx.buildBodyDigest()
|
||||
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
||||
ctx.buildStringToSign() // depends on canon string
|
||||
ctx.buildSignature() // depends on string to sign
|
||||
|
||||
if ctx.isPresign {
|
||||
ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
|
||||
} else {
|
||||
parts := []string{
|
||||
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
|
||||
"SignedHeaders=" + ctx.signedHeaders,
|
||||
"Signature=" + ctx.signature,
|
||||
}
|
||||
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildTime() {
|
||||
ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
|
||||
ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
|
||||
|
||||
if ctx.isPresign {
|
||||
duration := int64(ctx.ExpireTime / time.Second)
|
||||
ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
|
||||
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||
} else {
|
||||
ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildCredentialString() {
|
||||
ctx.credentialString = strings.Join([]string{
|
||||
ctx.formattedShortTime,
|
||||
ctx.Region,
|
||||
ctx.ServiceName,
|
||||
"aws4_request",
|
||||
}, "/")
|
||||
|
||||
if ctx.isPresign {
|
||||
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
|
||||
}
|
||||
}
|
||||
|
||||
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
||||
query := url.Values{}
|
||||
unsignedHeaders := http.Header{}
|
||||
for k, h := range header {
|
||||
if r.IsValid(k) {
|
||||
query[k] = h
|
||||
} else {
|
||||
unsignedHeaders[k] = h
|
||||
}
|
||||
}
|
||||
|
||||
return query, unsignedHeaders
|
||||
}
|
||||
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
||||
var headers []string
|
||||
headers = append(headers, "host")
|
||||
for k, v := range header {
|
||||
canonicalKey := http.CanonicalHeaderKey(k)
|
||||
if !r.IsValid(canonicalKey) {
|
||||
continue // ignored header
|
||||
}
|
||||
if ctx.SignedHeaderVals == nil {
|
||||
ctx.SignedHeaderVals = make(http.Header)
|
||||
}
|
||||
|
||||
lowerCaseKey := strings.ToLower(k)
|
||||
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
|
||||
// include additional values
|
||||
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
|
||||
continue
|
||||
}
|
||||
|
||||
headers = append(headers, lowerCaseKey)
|
||||
ctx.SignedHeaderVals[lowerCaseKey] = v
|
||||
}
|
||||
sort.Strings(headers)
|
||||
|
||||
ctx.signedHeaders = strings.Join(headers, ";")
|
||||
|
||||
if ctx.isPresign {
|
||||
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
|
||||
}
|
||||
|
||||
headerValues := make([]string, len(headers))
|
||||
for i, k := range headers {
|
||||
if k == "host" {
|
||||
headerValues[i] = "host:" + ctx.Request.URL.Host
|
||||
} else {
|
||||
headerValues[i] = k + ":" +
|
||||
strings.Join(ctx.SignedHeaderVals[k], ",")
|
||||
}
|
||||
}
|
||||
|
||||
ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildCanonicalString() {
|
||||
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
|
||||
uri := ctx.Request.URL.Opaque
|
||||
if uri != "" {
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
} else {
|
||||
uri = ctx.Request.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
if ctx.ServiceName != "s3" {
|
||||
uri = rest.EscapePath(uri, false)
|
||||
}
|
||||
|
||||
ctx.canonicalString = strings.Join([]string{
|
||||
ctx.Request.Method,
|
||||
uri,
|
||||
ctx.Request.URL.RawQuery,
|
||||
ctx.canonicalHeaders + "\n",
|
||||
ctx.signedHeaders,
|
||||
ctx.bodyDigest,
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildStringToSign() {
|
||||
ctx.stringToSign = strings.Join([]string{
|
||||
authHeaderPrefix,
|
||||
ctx.formattedTime,
|
||||
ctx.credentialString,
|
||||
hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildSignature() {
|
||||
secret := ctx.credValues.SecretAccessKey
|
||||
date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
|
||||
region := makeHmac(date, []byte(ctx.Region))
|
||||
service := makeHmac(region, []byte(ctx.ServiceName))
|
||||
credentials := makeHmac(service, []byte("aws4_request"))
|
||||
signature := makeHmac(credentials, []byte(ctx.stringToSign))
|
||||
ctx.signature = hex.EncodeToString(signature)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildBodyDigest() {
|
||||
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if ctx.isPresign && ctx.ServiceName == "s3" {
|
||||
hash = "UNSIGNED-PAYLOAD"
|
||||
} else if ctx.Body == nil {
|
||||
hash = emptyStringSHA256
|
||||
} else {
|
||||
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
||||
}
|
||||
if ctx.ServiceName == "s3" {
|
||||
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
||||
}
|
||||
}
|
||||
ctx.bodyDigest = hash
|
||||
}
|
||||
|
||||
// isRequestSigned returns if the request is currently signed or presigned
|
||||
func (ctx *signingCtx) isRequestSigned() bool {
|
||||
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
|
||||
return true
|
||||
}
|
||||
if ctx.Request.Header.Get("Authorization") != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// unsign removes signing flags for both signed and presigned requests.
|
||||
func (ctx *signingCtx) removePresign() {
|
||||
ctx.Query.Del("X-Amz-Algorithm")
|
||||
ctx.Query.Del("X-Amz-Signature")
|
||||
ctx.Query.Del("X-Amz-Security-Token")
|
||||
ctx.Query.Del("X-Amz-Date")
|
||||
ctx.Query.Del("X-Amz-Expires")
|
||||
ctx.Query.Del("X-Amz-Credential")
|
||||
ctx.Query.Del("X-Amz-SignedHeaders")
|
||||
}
|
||||
|
||||
func makeHmac(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256(data []byte) []byte {
|
||||
hash := sha256.New()
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
hash := sha256.New()
|
||||
start, _ := reader.Seek(0, 1)
|
||||
defer reader.Seek(start, 0)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
const doubleSpaces = " "
|
||||
|
||||
var doubleSpaceBytes = []byte(doubleSpaces)
|
||||
|
||||
func stripExcessSpaces(headerVals []string) []string {
|
||||
vals := make([]string, len(headerVals))
|
||||
for i, str := range headerVals {
|
||||
// Trim leading and trailing spaces
|
||||
trimmed := strings.TrimSpace(str)
|
||||
|
||||
idx := strings.Index(trimmed, doubleSpaces)
|
||||
var buf []byte
|
||||
for idx > -1 {
|
||||
// Multiple adjacent spaces found
|
||||
if buf == nil {
|
||||
// first time create the buffer
|
||||
buf = []byte(trimmed)
|
||||
}
|
||||
|
||||
stripToIdx := -1
|
||||
for j := idx + 1; j < len(buf); j++ {
|
||||
if buf[j] != ' ' {
|
||||
buf = append(buf[:idx+1], buf[j:]...)
|
||||
stripToIdx = j
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if stripToIdx >= 0 {
|
||||
idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
|
||||
if idx >= 0 {
|
||||
idx += stripToIdx
|
||||
}
|
||||
} else {
|
||||
idx = -1
|
||||
}
|
||||
}
|
||||
|
||||
if buf != nil {
|
||||
vals[i] = string(buf)
|
||||
} else {
|
||||
vals[i] = trimmed
|
||||
}
|
||||
}
|
||||
return vals
|
||||
}
|
30
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
30
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
|
@ -61,23 +61,41 @@ func (r ReaderSeekerCloser) Close() error {
|
|||
type WriteAtBuffer struct {
|
||||
buf []byte
|
||||
m sync.Mutex
|
||||
|
||||
// GrowthCoeff defines the growth rate of the internal buffer. By
|
||||
// default, the growth rate is 1, where expanding the internal
|
||||
// buffer will allocate only enough capacity to fit the new expected
|
||||
// length.
|
||||
GrowthCoeff float64
|
||||
}
|
||||
|
||||
// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
|
||||
// provided by buf.
|
||||
func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
|
||||
return &WriteAtBuffer{buf: buf}
|
||||
}
|
||||
|
||||
// WriteAt writes a slice of bytes to a buffer starting at the position provided
|
||||
// The number of bytes written will be returned, or error. Can overwrite previous
|
||||
// written slices if the write ats overlap.
|
||||
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
||||
pLen := len(p)
|
||||
expLen := pos + int64(pLen)
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
|
||||
expLen := pos + int64(len(p))
|
||||
if int64(len(b.buf)) < expLen {
|
||||
newBuf := make([]byte, expLen)
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
if int64(cap(b.buf)) < expLen {
|
||||
if b.GrowthCoeff < 1 {
|
||||
b.GrowthCoeff = 1
|
||||
}
|
||||
newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
|
||||
copy(newBuf, b.buf)
|
||||
b.buf = newBuf
|
||||
}
|
||||
b.buf = b.buf[:expLen]
|
||||
}
|
||||
copy(b.buf[pos:], p)
|
||||
return len(p), nil
|
||||
return pLen, nil
|
||||
}
|
||||
|
||||
// Bytes returns a slice of bytes written to the buffer.
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.1.0"
|
||||
const SDKVersion = "1.2.4"
|
||||
|
|
39
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
generated
vendored
39
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
generated
vendored
|
@ -8,6 +8,9 @@
|
|||
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
||||
"signatureVersion": "v4"
|
||||
},
|
||||
"cn-north-1/ec2metadata": {
|
||||
"endpoint": "http://169.254.169.254/latest"
|
||||
},
|
||||
"us-gov-west-1/iam": {
|
||||
"endpoint": "iam.us-gov.amazonaws.com"
|
||||
},
|
||||
|
@ -17,6 +20,9 @@
|
|||
"us-gov-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"us-gov-west-1/ec2metadata": {
|
||||
"endpoint": "http://169.254.169.254/latest"
|
||||
},
|
||||
"*/cloudfront": {
|
||||
"endpoint": "cloudfront.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
|
@ -30,8 +36,7 @@
|
|||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/ec2metadata": {
|
||||
"endpoint": "http://169.254.169.254/latest",
|
||||
"signingRegion": "us-east-1"
|
||||
"endpoint": "http://169.254.169.254/latest"
|
||||
},
|
||||
"*/iam": {
|
||||
"endpoint": "iam.amazonaws.com",
|
||||
|
@ -57,36 +62,14 @@
|
|||
"endpoint": "sdb.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"us-east-1/s3": {
|
||||
"endpoint": "s3.amazonaws.com"
|
||||
},
|
||||
"us-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"us-west-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"eu-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-southeast-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-southeast-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-northeast-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-northeast-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"sa-east-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"eu-central-1/s3": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com",
|
||||
"signatureVersion": "v4"
|
||||
"endpoint": "{service}.{region}.amazonaws.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
36
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
|
@ -31,8 +31,7 @@ var endpointsMap = endpointStruct{
|
|||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/ec2metadata": {
|
||||
Endpoint: "http://169.254.169.254/latest",
|
||||
SigningRegion: "us-east-1",
|
||||
Endpoint: "http://169.254.169.254/latest",
|
||||
},
|
||||
"*/iam": {
|
||||
Endpoint: "iam.amazonaws.com",
|
||||
|
@ -46,6 +45,9 @@ var endpointsMap = endpointStruct{
|
|||
Endpoint: "route53.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"*/sts": {
|
||||
Endpoint: "sts.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
|
@ -54,30 +56,15 @@ var endpointsMap = endpointStruct{
|
|||
Endpoint: "waf.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"ap-northeast-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-northeast-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"cn-north-1/*": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
||||
},
|
||||
"cn-north-1/ec2metadata": {
|
||||
Endpoint: "http://169.254.169.254/latest",
|
||||
},
|
||||
"eu-central-1/s3": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com",
|
||||
},
|
||||
"eu-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"sa-east-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-east-1/s3": {
|
||||
Endpoint: "s3.amazonaws.com",
|
||||
},
|
||||
|
@ -85,6 +72,9 @@ var endpointsMap = endpointStruct{
|
|||
Endpoint: "sdb.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"us-gov-west-1/ec2metadata": {
|
||||
Endpoint: "http://169.254.169.254/latest",
|
||||
},
|
||||
"us-gov-west-1/iam": {
|
||||
Endpoint: "iam.us-gov.amazonaws.com",
|
||||
},
|
||||
|
@ -94,11 +84,5 @@ var endpointsMap = endpointStruct{
|
|||
"us-gov-west-1/sts": {
|
||||
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
"us-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-west-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
38
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
38
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
|
@ -2,7 +2,7 @@ package query
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
@ -15,6 +15,10 @@ type xmlErrorResponse struct {
|
|||
RequestID string `xml:"RequestId"`
|
||||
}
|
||||
|
||||
type xmlServiceUnavailableResponse struct {
|
||||
XMLName xml.Name `xml:"ServiceUnavailableException"`
|
||||
}
|
||||
|
||||
// UnmarshalErrorHandler is a name request handler to unmarshal request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
|
@ -22,11 +26,16 @@ var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalEr
|
|||
func UnmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
resp := &xmlErrorResponse{}
|
||||
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
|
||||
if err != nil && err != io.EOF {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
|
||||
} else {
|
||||
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
|
||||
return
|
||||
}
|
||||
|
||||
// First check for specific error
|
||||
resp := xmlErrorResponse{}
|
||||
decodeErr := xml.Unmarshal(bodyBytes, &resp)
|
||||
if decodeErr == nil {
|
||||
reqID := resp.RequestID
|
||||
if reqID == "" {
|
||||
reqID = r.RequestID
|
||||
|
@ -36,5 +45,22 @@ func UnmarshalError(r *request.Request) {
|
|||
r.HTTPResponse.StatusCode,
|
||||
reqID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for unhandled error
|
||||
servUnavailResp := xmlServiceUnavailableResponse{}
|
||||
unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
|
||||
if unavailErr == nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("ServiceUnavailableException", "service is unavailable", nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Failed to retrieve any error message from the response body
|
||||
r.Error = awserr.New("SerializationError",
|
||||
"failed to decode query XML error response", decodeErr)
|
||||
}
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
@ -222,8 +222,7 @@ func EscapePath(path string, encodeSep bool) string {
|
|||
if noEscape[c] || (c == '/' && !encodeSep) {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteByte('%')
|
||||
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
|
||||
fmt.Fprintf(&buf, "%%%02X", c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
|
@ -3,6 +3,7 @@ package rest
|
|||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
@ -51,6 +52,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
|
|||
if payload.IsValid() {
|
||||
switch payload.Interface().(type) {
|
||||
case []byte:
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||
|
@ -58,6 +60,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
|
|||
payload.Set(reflect.ValueOf(b))
|
||||
}
|
||||
case *string:
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||
|
@ -72,6 +75,8 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
|
|||
case "aws.ReadSeekCloser", "io.ReadCloser":
|
||||
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
|
||||
default:
|
||||
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
r.Error = awserr.New("SerializationError",
|
||||
"failed to decode REST response",
|
||||
fmt.Errorf("unknown payload type %s", payload.Type()))
|
||||
|
|
438
vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
generated
vendored
438
vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
generated
vendored
|
@ -1,438 +0,0 @@
|
|||
// Package v4 implements signing for AWS V4 signer
|
||||
package v4
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
timeFormat = "20060102T150405Z"
|
||||
shortTimeFormat = "20060102"
|
||||
)
|
||||
|
||||
var ignoredHeaders = rules{
|
||||
blacklist{
|
||||
mapRule{
|
||||
"Content-Length": struct{}{},
|
||||
"User-Agent": struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// requiredSignedHeaders is a whitelist for build canonical headers.
|
||||
var requiredSignedHeaders = rules{
|
||||
whitelist{
|
||||
mapRule{
|
||||
"Cache-Control": struct{}{},
|
||||
"Content-Disposition": struct{}{},
|
||||
"Content-Encoding": struct{}{},
|
||||
"Content-Language": struct{}{},
|
||||
"Content-Md5": struct{}{},
|
||||
"Content-Type": struct{}{},
|
||||
"Expires": struct{}{},
|
||||
"If-Match": struct{}{},
|
||||
"If-Modified-Since": struct{}{},
|
||||
"If-None-Match": struct{}{},
|
||||
"If-Unmodified-Since": struct{}{},
|
||||
"Range": struct{}{},
|
||||
"X-Amz-Acl": struct{}{},
|
||||
"X-Amz-Copy-Source": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||
"X-Amz-Copy-Source-Range": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Grant-Full-control": struct{}{},
|
||||
"X-Amz-Grant-Read": struct{}{},
|
||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||
"X-Amz-Grant-Write": struct{}{},
|
||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||
"X-Amz-Metadata-Directive": struct{}{},
|
||||
"X-Amz-Mfa": struct{}{},
|
||||
"X-Amz-Request-Payer": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
},
|
||||
},
|
||||
patterns{"X-Amz-Meta-"},
|
||||
}
|
||||
|
||||
// allowedHoisting is a whitelist for build query headers. The boolean value
|
||||
// represents whether or not it is a pattern.
|
||||
var allowedQueryHoisting = inclusiveRules{
|
||||
blacklist{requiredSignedHeaders},
|
||||
patterns{"X-Amz-"},
|
||||
}
|
||||
|
||||
type signer struct {
|
||||
Request *http.Request
|
||||
Time time.Time
|
||||
ExpireTime time.Duration
|
||||
ServiceName string
|
||||
Region string
|
||||
CredValues credentials.Value
|
||||
Credentials *credentials.Credentials
|
||||
Query url.Values
|
||||
Body io.ReadSeeker
|
||||
Debug aws.LogLevelType
|
||||
Logger aws.Logger
|
||||
|
||||
isPresign bool
|
||||
formattedTime string
|
||||
formattedShortTime string
|
||||
|
||||
signedHeaders string
|
||||
canonicalHeaders string
|
||||
canonicalString string
|
||||
credentialString string
|
||||
stringToSign string
|
||||
signature string
|
||||
authorization string
|
||||
notHoist bool
|
||||
signedHeaderVals http.Header
|
||||
}
|
||||
|
||||
// Sign requests with signature version 4.
|
||||
//
|
||||
// Will sign the requests with the service config's Credentials object
|
||||
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
|
||||
// object.
|
||||
func Sign(req *request.Request) {
|
||||
// If the request does not need to be signed ignore the signing of the
|
||||
// request if the AnonymousCredentials object is used.
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
|
||||
region := req.ClientInfo.SigningRegion
|
||||
if region == "" {
|
||||
region = aws.StringValue(req.Config.Region)
|
||||
}
|
||||
|
||||
name := req.ClientInfo.SigningName
|
||||
if name == "" {
|
||||
name = req.ClientInfo.ServiceName
|
||||
}
|
||||
|
||||
s := signer{
|
||||
Request: req.HTTPRequest,
|
||||
Time: req.Time,
|
||||
ExpireTime: req.ExpireTime,
|
||||
Query: req.HTTPRequest.URL.Query(),
|
||||
Body: req.Body,
|
||||
ServiceName: name,
|
||||
Region: region,
|
||||
Credentials: req.Config.Credentials,
|
||||
Debug: req.Config.LogLevel.Value(),
|
||||
Logger: req.Config.Logger,
|
||||
notHoist: req.NotHoist,
|
||||
}
|
||||
|
||||
req.Error = s.sign()
|
||||
req.SignedHeaderVals = s.signedHeaderVals
|
||||
}
|
||||
|
||||
func (v4 *signer) sign() error {
|
||||
if v4.ExpireTime != 0 {
|
||||
v4.isPresign = true
|
||||
}
|
||||
|
||||
if v4.isRequestSigned() {
|
||||
if !v4.Credentials.IsExpired() {
|
||||
// If the request is already signed, and the credentials have not
|
||||
// expired yet ignore the signing request.
|
||||
return nil
|
||||
}
|
||||
|
||||
// The credentials have expired for this request. The current signing
|
||||
// is invalid, and needs to be request because the request will fail.
|
||||
if v4.isPresign {
|
||||
v4.removePresign()
|
||||
// Update the request's query string to ensure the values stays in
|
||||
// sync in the case retrieving the new credentials fails.
|
||||
v4.Request.URL.RawQuery = v4.Query.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
v4.CredValues, err = v4.Credentials.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||
if v4.CredValues.SessionToken != "" {
|
||||
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
||||
} else {
|
||||
v4.Query.Del("X-Amz-Security-Token")
|
||||
}
|
||||
} else if v4.CredValues.SessionToken != "" {
|
||||
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
||||
}
|
||||
|
||||
v4.build()
|
||||
|
||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||
v4.logSigningInfo()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const logSignInfoMsg = `DEBUG: Request Signiture:
|
||||
---[ CANONICAL STRING ]-----------------------------
|
||||
%s
|
||||
---[ STRING TO SIGN ]--------------------------------
|
||||
%s%s
|
||||
-----------------------------------------------------`
|
||||
const logSignedURLMsg = `
|
||||
---[ SIGNED URL ]------------------------------------
|
||||
%s`
|
||||
|
||||
func (v4 *signer) logSigningInfo() {
|
||||
signedURLMsg := ""
|
||||
if v4.isPresign {
|
||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
|
||||
}
|
||||
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
|
||||
v4.Logger.Log(msg)
|
||||
}
|
||||
|
||||
func (v4 *signer) build() {
|
||||
|
||||
v4.buildTime() // no depends
|
||||
v4.buildCredentialString() // no depends
|
||||
|
||||
unsignedHeaders := v4.Request.Header
|
||||
if v4.isPresign {
|
||||
if !v4.notHoist {
|
||||
urlValues := url.Values{}
|
||||
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
||||
for k := range urlValues {
|
||||
v4.Query[k] = urlValues[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||
v4.buildCanonicalString() // depends on canon headers / signed headers
|
||||
v4.buildStringToSign() // depends on canon string
|
||||
v4.buildSignature() // depends on string to sign
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
|
||||
} else {
|
||||
parts := []string{
|
||||
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
|
||||
"SignedHeaders=" + v4.signedHeaders,
|
||||
"Signature=" + v4.signature,
|
||||
}
|
||||
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildTime() {
|
||||
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
|
||||
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
|
||||
|
||||
if v4.isPresign {
|
||||
duration := int64(v4.ExpireTime / time.Second)
|
||||
v4.Query.Set("X-Amz-Date", v4.formattedTime)
|
||||
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||
} else {
|
||||
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCredentialString() {
|
||||
v4.credentialString = strings.Join([]string{
|
||||
v4.formattedShortTime,
|
||||
v4.Region,
|
||||
v4.ServiceName,
|
||||
"aws4_request",
|
||||
}, "/")
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
|
||||
}
|
||||
}
|
||||
|
||||
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
||||
query := url.Values{}
|
||||
unsignedHeaders := http.Header{}
|
||||
for k, h := range header {
|
||||
if r.IsValid(k) {
|
||||
query[k] = h
|
||||
} else {
|
||||
unsignedHeaders[k] = h
|
||||
}
|
||||
}
|
||||
|
||||
return query, unsignedHeaders
|
||||
}
|
||||
func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
|
||||
var headers []string
|
||||
headers = append(headers, "host")
|
||||
for k, v := range header {
|
||||
canonicalKey := http.CanonicalHeaderKey(k)
|
||||
if !r.IsValid(canonicalKey) {
|
||||
continue // ignored header
|
||||
}
|
||||
|
||||
lowerCaseKey := strings.ToLower(k)
|
||||
headers = append(headers, lowerCaseKey)
|
||||
|
||||
if v4.signedHeaderVals == nil {
|
||||
v4.signedHeaderVals = make(http.Header)
|
||||
}
|
||||
v4.signedHeaderVals[lowerCaseKey] = v
|
||||
}
|
||||
sort.Strings(headers)
|
||||
|
||||
v4.signedHeaders = strings.Join(headers, ";")
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
|
||||
}
|
||||
|
||||
headerValues := make([]string, len(headers))
|
||||
for i, k := range headers {
|
||||
if k == "host" {
|
||||
headerValues[i] = "host:" + v4.Request.URL.Host
|
||||
} else {
|
||||
headerValues[i] = k + ":" +
|
||||
strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
|
||||
}
|
||||
}
|
||||
|
||||
v4.canonicalHeaders = strings.Join(headerValues, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCanonicalString() {
|
||||
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
|
||||
uri := v4.Request.URL.Opaque
|
||||
if uri != "" {
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
} else {
|
||||
uri = v4.Request.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
if v4.ServiceName != "s3" {
|
||||
uri = rest.EscapePath(uri, false)
|
||||
}
|
||||
|
||||
v4.canonicalString = strings.Join([]string{
|
||||
v4.Request.Method,
|
||||
uri,
|
||||
v4.Request.URL.RawQuery,
|
||||
v4.canonicalHeaders + "\n",
|
||||
v4.signedHeaders,
|
||||
v4.bodyDigest(),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildStringToSign() {
|
||||
v4.stringToSign = strings.Join([]string{
|
||||
authHeaderPrefix,
|
||||
v4.formattedTime,
|
||||
v4.credentialString,
|
||||
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildSignature() {
|
||||
secret := v4.CredValues.SecretAccessKey
|
||||
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
|
||||
region := makeHmac(date, []byte(v4.Region))
|
||||
service := makeHmac(region, []byte(v4.ServiceName))
|
||||
credentials := makeHmac(service, []byte("aws4_request"))
|
||||
signature := makeHmac(credentials, []byte(v4.stringToSign))
|
||||
v4.signature = hex.EncodeToString(signature)
|
||||
}
|
||||
|
||||
func (v4 *signer) bodyDigest() string {
|
||||
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if v4.isPresign && v4.ServiceName == "s3" {
|
||||
hash = "UNSIGNED-PAYLOAD"
|
||||
} else if v4.Body == nil {
|
||||
hash = hex.EncodeToString(makeSha256([]byte{}))
|
||||
} else {
|
||||
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
|
||||
}
|
||||
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// isRequestSigned returns if the request is currently signed or presigned
|
||||
func (v4 *signer) isRequestSigned() bool {
|
||||
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
|
||||
return true
|
||||
}
|
||||
if v4.Request.Header.Get("Authorization") != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// unsign removes signing flags for both signed and presigned requests.
|
||||
func (v4 *signer) removePresign() {
|
||||
v4.Query.Del("X-Amz-Algorithm")
|
||||
v4.Query.Del("X-Amz-Signature")
|
||||
v4.Query.Del("X-Amz-Security-Token")
|
||||
v4.Query.Del("X-Amz-Date")
|
||||
v4.Query.Del("X-Amz-Expires")
|
||||
v4.Query.Del("X-Amz-Credential")
|
||||
v4.Query.Del("X-Amz-SignedHeaders")
|
||||
}
|
||||
|
||||
func makeHmac(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256(data []byte) []byte {
|
||||
hash := sha256.New()
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
hash := sha256.New()
|
||||
start, _ := reader.Seek(0, 1)
|
||||
defer reader.Seek(start, 0)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
return hash.Sum(nil)
|
||||
}
|
18
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// An AWSEpochTime wraps a time value providing JSON serialization needed for
|
||||
|
@ -110,6 +111,12 @@ func (p *Policy) Validate() error {
|
|||
if s.Resource == "" {
|
||||
return fmt.Errorf("statement at index %d does not have a resource", i)
|
||||
}
|
||||
if !isASCII(s.Resource) {
|
||||
return fmt.Errorf("unable to sign resource, [%s]. "+
|
||||
"Resources must only contain ascii characters. "+
|
||||
"Hostnames with unicode should be encoded as Punycode, (e.g. golang.org/x/net/idna), "+
|
||||
"and URL unicode path/query characters should be escaped.", s.Resource)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -120,7 +127,7 @@ func (p *Policy) Validate() error {
|
|||
func CreateResource(scheme, u string) (string, error) {
|
||||
scheme = strings.ToLower(scheme)
|
||||
|
||||
if scheme == "http" || scheme == "https" {
|
||||
if scheme == "http" || scheme == "https" || scheme == "http*" || scheme == "*" {
|
||||
return u, nil
|
||||
}
|
||||
|
||||
|
@ -208,3 +215,12 @@ func awsEscapeEncoded(b []byte) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isASCII(u string) bool {
|
||||
for _, c := range u {
|
||||
if c > unicode.MaxASCII {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
241
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go
generated
vendored
Normal file
241
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
|||
package sign
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// CookiePolicyName name of the policy cookie
|
||||
CookiePolicyName = "CloudFront-Policy"
|
||||
// CookieSignatureName name of the signature cookie
|
||||
CookieSignatureName = "CloudFront-Signature"
|
||||
// CookieKeyIDName name of the signing Key ID cookie
|
||||
CookieKeyIDName = "CloudFront-Key-Pair-Id"
|
||||
)
|
||||
|
||||
// A CookieOptions optional additonal options that can be applied to the signed
|
||||
// cookies.
|
||||
type CookieOptions struct {
|
||||
Path string
|
||||
Domain string
|
||||
Secure bool
|
||||
}
|
||||
|
||||
// apply will integration the options provided into the base cookie options
|
||||
// a new copy will be returned. The base CookieOption will not be modified.
|
||||
func (o CookieOptions) apply(opts ...func(*CookieOptions)) CookieOptions {
|
||||
if len(opts) == 0 {
|
||||
return o
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// A CookieSigner provides signing utilities to sign Cookies for Amazon CloudFront
|
||||
// resources. Using a private key and Credential Key Pair key ID the CookieSigner
|
||||
// only needs to be created once per Credential Key Pair key ID and private key.
|
||||
//
|
||||
// More information about signed Cookies and their structure can be found at:
|
||||
// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html
|
||||
//
|
||||
// To sign a Cookie, create a CookieSigner with your private key and credential
|
||||
// pair key ID. Once you have a CookieSigner instance you can call Sign or
|
||||
// SignWithPolicy to sign the URLs.
|
||||
//
|
||||
// The signer is safe to use concurrently, but the optional cookies options
|
||||
// are not safe to modify concurrently.
|
||||
type CookieSigner struct {
|
||||
keyID string
|
||||
privKey *rsa.PrivateKey
|
||||
|
||||
Opts CookieOptions
|
||||
}
|
||||
|
||||
// NewCookieSigner constructs and returns a new CookieSigner to be used to for
|
||||
// signing Amazon CloudFront URL resources with.
|
||||
func NewCookieSigner(keyID string, privKey *rsa.PrivateKey, opts ...func(*CookieOptions)) *CookieSigner {
|
||||
signer := &CookieSigner{
|
||||
keyID: keyID,
|
||||
privKey: privKey,
|
||||
Opts: CookieOptions{}.apply(opts...),
|
||||
}
|
||||
|
||||
return signer
|
||||
}
|
||||
|
||||
// Sign returns the cookies needed to allow user agents to make arbetrary
|
||||
// requests to cloudfront for the resource(s) defined by the policy.
|
||||
//
|
||||
// Sign will create a CloudFront policy with only a resource and condition of
|
||||
// DateLessThan equal to the expires time provided.
|
||||
//
|
||||
// The returned slice cookies should all be added to the Client's cookies or
|
||||
// server's response.
|
||||
//
|
||||
// Example:
|
||||
// s := NewCookieSigner(keyID, privKey)
|
||||
//
|
||||
// // Get Signed cookies for a resource that will expire in 1 hour
|
||||
// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour))
|
||||
// if err != nil {
|
||||
// fmt.Println("failed to create signed cookies", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// // Or get Signed cookies for a resource that will expire in 1 hour
|
||||
// // and set path and domain of cookies
|
||||
// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour), func(o *sign.CookieOptions) {
|
||||
// o.Path = "/"
|
||||
// o.Domain = ".example.com"
|
||||
// })
|
||||
// if err != nil {
|
||||
// fmt.Println("failed to create signed cookies", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// // Server Response via http.ResponseWriter
|
||||
// for _, c := range cookies {
|
||||
// http.SetCookie(w, c)
|
||||
// }
|
||||
//
|
||||
// // Client request via the cookie jar
|
||||
// if client.CookieJar != nil {
|
||||
// for _, c := range cookies {
|
||||
// client.Cookie(w, c)
|
||||
// }
|
||||
// }
|
||||
func (s CookieSigner) Sign(u string, expires time.Time, opts ...func(*CookieOptions)) ([]*http.Cookie, error) {
|
||||
scheme, err := cookieURLScheme(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resource, err := CreateResource(scheme, u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := NewCannedPolicy(resource, expires)
|
||||
return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...))
|
||||
}
|
||||
|
||||
// Returns and validates the URL's scheme.
|
||||
// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html#private-content-custom-policy-statement-cookies
|
||||
func cookieURLScheme(u string) (string, error) {
|
||||
parts := strings.SplitN(u, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", fmt.Errorf("invalid cookie URL, missing scheme")
|
||||
}
|
||||
|
||||
scheme := strings.ToLower(parts[0])
|
||||
if scheme != "http" && scheme != "https" && scheme != "http*" {
|
||||
return "", fmt.Errorf("invalid cookie URL scheme. Expect http, https, or http*. Go, %s", scheme)
|
||||
}
|
||||
|
||||
return scheme, nil
|
||||
}
|
||||
|
||||
// SignWithPolicy returns the cookies needed to allow user agents to make
|
||||
// arbetrairy requets to cloudfront for the resource(s) defined by the policy.
|
||||
//
|
||||
// The returned slice cookies should all be added to the Client's cookies or
|
||||
// server's response.
|
||||
//
|
||||
// Example:
|
||||
// s := NewCookieSigner(keyID, privKey)
|
||||
//
|
||||
// policy := &sign.Policy{
|
||||
// Statements: []sign.Statement{
|
||||
// {
|
||||
// // Read the provided documentation on how to set this
|
||||
// // correctly, you'll probably want to use wildcards.
|
||||
// Resource: RawCloudFrontURL,
|
||||
// Condition: sign.Condition{
|
||||
// // Optional IP source address range
|
||||
// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"},
|
||||
// // Optional date URL is not valid until
|
||||
// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)},
|
||||
// // Required date the URL will expire after
|
||||
// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// // Get Signed cookies for a resource that will expire in 1 hour
|
||||
// cookies, err := s.SignWithPolicy(policy)
|
||||
// if err != nil {
|
||||
// fmt.Println("failed to create signed cookies", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// // Or get Signed cookies for a resource that will expire in 1 hour
|
||||
// // and set path and domain of cookies
|
||||
// cookies, err := s.Sign(policy, func(o *sign.CookieOptions) {
|
||||
// o.Path = "/"
|
||||
// o.Domain = ".example.com"
|
||||
// })
|
||||
// if err != nil {
|
||||
// fmt.Println("failed to create signed cookies", err)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// // Server Response via http.ResponseWriter
|
||||
// for _, c := range cookies {
|
||||
// http.SetCookie(w, c)
|
||||
// }
|
||||
//
|
||||
// // Client request via the cookie jar
|
||||
// if client.CookieJar != nil {
|
||||
// for _, c := range cookies {
|
||||
// client.Cookie(w, c)
|
||||
// }
|
||||
// }
|
||||
func (s CookieSigner) SignWithPolicy(p *Policy, opts ...func(*CookieOptions)) ([]*http.Cookie, error) {
|
||||
return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...))
|
||||
}
|
||||
|
||||
// Prepares the cookies to be attached to the header. An (optional) options
|
||||
// struct is provided in case people don't want to manually edit their cookies.
|
||||
func createCookies(p *Policy, keyID string, privKey *rsa.PrivateKey, opt CookieOptions) ([]*http.Cookie, error) {
|
||||
b64Sig, b64Policy, err := p.Sign(privKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Creates proper cookies
|
||||
cPolicy := &http.Cookie{
|
||||
Name: CookiePolicyName,
|
||||
Value: string(b64Policy),
|
||||
HttpOnly: true,
|
||||
}
|
||||
cSignature := &http.Cookie{
|
||||
Name: CookieSignatureName,
|
||||
Value: string(b64Sig),
|
||||
HttpOnly: true,
|
||||
}
|
||||
cKey := &http.Cookie{
|
||||
Name: CookieKeyIDName,
|
||||
Value: keyID,
|
||||
HttpOnly: true,
|
||||
}
|
||||
|
||||
cookies := []*http.Cookie{cPolicy, cSignature, cKey}
|
||||
|
||||
// Applie the cookie options
|
||||
for _, c := range cookies {
|
||||
c.Path = opt.Path
|
||||
c.Domain = opt.Domain
|
||||
c.Secure = opt.Secure
|
||||
}
|
||||
|
||||
return cookies, nil
|
||||
}
|
16
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go
generated
vendored
|
@ -90,19 +90,19 @@ func (s URLSigner) Sign(url string, expires time.Time) (string, error) {
|
|||
// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and
|
||||
// // restricted to the 192.0.2.0/24 IP address range.
|
||||
// policy := &sign.Policy{
|
||||
// Statements: []Statement{
|
||||
// Statements: []sign.Statement{
|
||||
// {
|
||||
// Resource: rawURL,
|
||||
// Condition: Condition{
|
||||
// Condition: sign.Condition{
|
||||
// // Optional IP source address range
|
||||
// IPAddress: &IPAddress{SourceIP: "192.0.2.0/24"},
|
||||
// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"},
|
||||
// // Optional date URL is not valid until
|
||||
// DateGreaterThan: &AWSEpochTime{time.Now().Add(30 * time.Minute)},
|
||||
// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)},
|
||||
// // Required date the URL will expire after
|
||||
// DateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)},
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// signer := sign.NewURLSigner(keyID, privKey)
|
||||
|
|
3352
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
3352
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
57
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
57
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
|
@ -6,32 +6,41 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
initClient = func(c *client.Client) {
|
||||
// Support building custom host-style bucket endpoints
|
||||
c.Handlers.Build.PushFront(updateHostWithBucket)
|
||||
initClient = defaultInitClientFn
|
||||
initRequest = defaultInitRequestFn
|
||||
}
|
||||
|
||||
// Require SSL when using SSE keys
|
||||
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
||||
c.Handlers.Build.PushBack(computeSSEKeys)
|
||||
func defaultInitClientFn(c *client.Client) {
|
||||
// Support building custom endpoints based on config
|
||||
c.Handlers.Build.PushFront(updateEndpointForS3Config)
|
||||
|
||||
// S3 uses custom error unmarshaling logic
|
||||
c.Handlers.UnmarshalError.Clear()
|
||||
c.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
}
|
||||
// Require SSL when using SSE keys
|
||||
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
||||
c.Handlers.Build.PushBack(computeSSEKeys)
|
||||
|
||||
initRequest = func(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration:
|
||||
// These S3 operations require Content-MD5 to be set
|
||||
r.Handlers.Build.PushBack(contentMD5)
|
||||
case opGetBucketLocation:
|
||||
// GetBucketLocation has custom parsing logic
|
||||
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
|
||||
case opCreateBucket:
|
||||
// Auto-populate LocationConstraint with current region
|
||||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
}
|
||||
// S3 uses custom error unmarshaling logic
|
||||
c.Handlers.UnmarshalError.Clear()
|
||||
c.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
}
|
||||
|
||||
func defaultInitRequestFn(r *request.Request) {
|
||||
// Add reuest handlers for specific platforms.
|
||||
// e.g. 100-continue support for PUT requests using Go 1.6
|
||||
platformRequestHandlers(r)
|
||||
|
||||
switch r.Operation.Name {
|
||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
|
||||
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
|
||||
opPutBucketReplication:
|
||||
// These S3 operations require Content-MD5 to be set
|
||||
r.Handlers.Build.PushBack(contentMD5)
|
||||
case opGetBucketLocation:
|
||||
// GetBucketLocation has custom parsing logic
|
||||
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
|
||||
case opCreateBucket:
|
||||
// Auto-populate LocationConstraint with current region
|
||||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
}
|
||||
}
|
||||
|
|
165
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
165
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
|
@ -1,14 +1,124 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// an operationBlacklist is a list of operation names that should a
|
||||
// request handler should not be executed with.
|
||||
type operationBlacklist []string
|
||||
|
||||
// Continue will return true of the Request's operation name is not
|
||||
// in the blacklist. False otherwise.
|
||||
func (b operationBlacklist) Continue(r *request.Request) bool {
|
||||
for i := 0; i < len(b); i++ {
|
||||
if b[i] == r.Operation.Name {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var accelerateOpBlacklist = operationBlacklist{
|
||||
opListBuckets, opCreateBucket, opDeleteBucket,
|
||||
}
|
||||
|
||||
// Request handler to automatically add the bucket name to the endpoint domain
|
||||
// if possible. This style of bucket is valid for all bucket names which are
|
||||
// DNS compatible and do not contain "."
|
||||
func updateEndpointForS3Config(r *request.Request) {
|
||||
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
|
||||
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
|
||||
|
||||
if accelerate && accelerateOpBlacklist.Continue(r) {
|
||||
if forceHostStyle {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
|
||||
}
|
||||
}
|
||||
updateEndpointForAccelerate(r)
|
||||
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
|
||||
updateEndpointForHostStyle(r)
|
||||
}
|
||||
}
|
||||
|
||||
func updateEndpointForHostStyle(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucketname was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||
// bucket name must be valid to put into the host
|
||||
return
|
||||
}
|
||||
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||
}
|
||||
|
||||
func updateEndpointForAccelerate(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucketname was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||
r.Error = awserr.New("InvalidParameterException",
|
||||
fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket),
|
||||
nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
|
||||
r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||
}
|
||||
|
||||
// Attempts to retrieve the bucket name from the request input parameters.
|
||||
// If no bucket is found, or the field is empty "", false will be returned.
|
||||
func bucketNameFromReqParams(params interface{}) (string, bool) {
|
||||
b, _ := awsutil.ValuesAtPath(params, "Bucket")
|
||||
if len(b) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if bucket, ok := b[0].(*string); ok {
|
||||
if bucketStr := aws.StringValue(bucket); bucketStr != "" {
|
||||
return bucketStr, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// hostCompatibleBucketName returns true if the request should
|
||||
// put the bucket in the host. This is false if S3ForcePathStyle is
|
||||
// explicitly set or if the bucket is not DNS compatible.
|
||||
func hostCompatibleBucketName(u *url.URL, bucket string) bool {
|
||||
// Bucket might be DNS compatible but dots in the hostname will fail
|
||||
// certificate validation, so do not use host-style.
|
||||
if u.Scheme == "https" && strings.Contains(bucket, ".") {
|
||||
return false
|
||||
}
|
||||
|
||||
// if the bucket is DNS compatible
|
||||
return dnsCompatibleBucketName(bucket)
|
||||
}
|
||||
|
||||
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
||||
|
||||
|
@ -20,41 +130,36 @@ func dnsCompatibleBucketName(bucket string) bool {
|
|||
!strings.Contains(bucket, "..")
|
||||
}
|
||||
|
||||
// hostStyleBucketName returns true if the request should put the bucket in
|
||||
// the host. This is false if S3ForcePathStyle is explicitly set or if the
|
||||
// bucket is not DNS compatible.
|
||||
func hostStyleBucketName(r *request.Request, bucket string) bool {
|
||||
if aws.BoolValue(r.Config.S3ForcePathStyle) {
|
||||
return false
|
||||
// moveBucketToHost moves the bucket name from the URI path to URL host.
|
||||
func moveBucketToHost(u *url.URL, bucket string) {
|
||||
u.Host = bucket + "." + u.Host
|
||||
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
|
||||
if u.Path == "" {
|
||||
u.Path = "/"
|
||||
}
|
||||
|
||||
// Bucket might be DNS compatible but dots in the hostname will fail
|
||||
// certificate validation, so do not use host-style.
|
||||
if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBucketLocation should be able to be called from any region within
|
||||
// a partition, and return the associated region of the bucket.
|
||||
if r.Operation.Name == opGetBucketLocation {
|
||||
return false
|
||||
}
|
||||
|
||||
// Use host-style if the bucket is DNS compatible
|
||||
return dnsCompatibleBucketName(bucket)
|
||||
}
|
||||
|
||||
func updateHostWithBucket(r *request.Request) {
|
||||
b, _ := awsutil.ValuesAtPath(r.Params, "Bucket")
|
||||
if len(b) == 0 {
|
||||
return
|
||||
const s3HostPrefix = "s3"
|
||||
|
||||
// replaceHostRegion replaces the S3 region string in the host with the
|
||||
// value provided. If v is empty the host prefix returned will be s3.
|
||||
func replaceHostRegion(host, v string) string {
|
||||
if !strings.HasPrefix(host, s3HostPrefix) {
|
||||
return host
|
||||
}
|
||||
|
||||
if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) {
|
||||
r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host
|
||||
r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1)
|
||||
if r.HTTPRequest.URL.Path == "" {
|
||||
r.HTTPRequest.URL.Path = "/"
|
||||
suffix := host[len(s3HostPrefix):]
|
||||
for i := len(s3HostPrefix); i < len(host); i++ {
|
||||
if host[i] == '.' {
|
||||
// Trim until '.' leave the it in place.
|
||||
suffix = host[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
return fmt.Sprintf("s3%s", suffix)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("s3-%s%s", v, suffix)
|
||||
}
|
||||
|
|
8
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
generated
vendored
Normal file
8
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// +build !go1.6
|
||||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
func platformRequestHandlers(r *request.Request) {
|
||||
}
|
28
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
generated
vendored
Normal file
28
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// +build go1.6
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
func platformRequestHandlers(r *request.Request) {
|
||||
if r.Operation.HTTPMethod == "PUT" {
|
||||
// 100-Continue should only be used on put requests.
|
||||
r.Handlers.Sign.PushBack(add100Continue)
|
||||
}
|
||||
}
|
||||
|
||||
func add100Continue(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3Disable100Continue) {
|
||||
return
|
||||
}
|
||||
if r.HTTPRequest.ContentLength < 1024*1024*2 {
|
||||
// Ignore requests smaller than 2MB. This helps prevent delaying
|
||||
// requests unnecessarily.
|
||||
return
|
||||
}
|
||||
|
||||
r.HTTPRequest.Header.Set("Expect", "100-Continue")
|
||||
}
|
4
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
@ -7,8 +7,8 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
||||
"github.com/aws/aws-sdk-go/private/signer/v4"
|
||||
)
|
||||
|
||||
// S3 is a client for Amazon S3.
|
||||
|
@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBack(v4.Sign)
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
|
@ -20,6 +21,7 @@ type xmlErrorResponse struct {
|
|||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
|
||||
|
||||
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
generated
vendored
|
@ -18,6 +18,12 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
|
|||
Argument: "",
|
||||
Expected: 200,
|
||||
},
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
Argument: "",
|
||||
Expected: 301,
|
||||
},
|
||||
{
|
||||
State: "success",
|
||||
Matcher: "status",
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore
generated
vendored
Normal file
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
testdata/conf_out.ini
|
||||
ini.sublime-project
|
||||
ini.sublime-workspace
|
||||
testdata/conf_reflect.ini
|
191
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE
generated
vendored
Normal file
191
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
560
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md
generated
vendored
Normal file
560
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md
generated
vendored
Normal file
|
@ -0,0 +1,560 @@
|
|||
ini [](https://drone.io/github.com/go-ini/ini/latest) [](http://gocover.io/github.com/go-ini/ini)
|
||||
===
|
||||
|
||||

|
||||
|
||||
Package ini provides INI file read and write functionality in Go.
|
||||
|
||||
[简体中文](README_ZH.md)
|
||||
|
||||
## Feature
|
||||
|
||||
- Load multiple data sources(`[]byte` or file) with overwrites.
|
||||
- Read with recursion values.
|
||||
- Read with parent-child sections.
|
||||
- Read with auto-increment key names.
|
||||
- Read with multiple-line values.
|
||||
- Read with tons of helper methods.
|
||||
- Read and convert values to Go types.
|
||||
- Read and **WRITE** comments of sections and keys.
|
||||
- Manipulate sections, keys and comments with ease.
|
||||
- Keep sections and keys in order as you parse and save.
|
||||
|
||||
## Installation
|
||||
|
||||
go get gopkg.in/ini.v1
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Loading from data sources
|
||||
|
||||
A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
|
||||
|
||||
```go
|
||||
cfg, err := ini.Load([]byte("raw data"), "filename")
|
||||
```
|
||||
|
||||
Or start with an empty object:
|
||||
|
||||
```go
|
||||
cfg := ini.Empty()
|
||||
```
|
||||
|
||||
When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
|
||||
|
||||
```go
|
||||
err := cfg.Append("other file", []byte("other raw data"))
|
||||
```
|
||||
|
||||
### Working with sections
|
||||
|
||||
To get a section, you would need to:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("section name")
|
||||
```
|
||||
|
||||
For a shortcut for default section, just give an empty string as name:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("")
|
||||
```
|
||||
|
||||
When you're pretty sure the section exists, following code could make your life easier:
|
||||
|
||||
```go
|
||||
section := cfg.Section("")
|
||||
```
|
||||
|
||||
What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
|
||||
|
||||
To create a new section:
|
||||
|
||||
```go
|
||||
err := cfg.NewSection("new section")
|
||||
```
|
||||
|
||||
To get a list of sections or section names:
|
||||
|
||||
```go
|
||||
sections := cfg.Sections()
|
||||
names := cfg.SectionStrings()
|
||||
```
|
||||
|
||||
### Working with keys
|
||||
|
||||
To get a key under a section:
|
||||
|
||||
```go
|
||||
key, err := cfg.Section("").GetKey("key name")
|
||||
```
|
||||
|
||||
Same rule applies to key operations:
|
||||
|
||||
```go
|
||||
key := cfg.Section("").Key("key name")
|
||||
```
|
||||
|
||||
To create a new key:
|
||||
|
||||
```go
|
||||
err := cfg.Section("").NewKey("name", "value")
|
||||
```
|
||||
|
||||
To get a list of keys or key names:
|
||||
|
||||
```go
|
||||
keys := cfg.Section("").Keys()
|
||||
names := cfg.Section("").KeyStrings()
|
||||
```
|
||||
|
||||
To get a clone hash of keys and corresponding values:
|
||||
|
||||
```go
|
||||
hash := cfg.GetSection("").KeysHash()
|
||||
```
|
||||
|
||||
### Working with values
|
||||
|
||||
To get a string value:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").String()
|
||||
```
|
||||
|
||||
To validate key value on the fly:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
||||
if len(in) == 0 {
|
||||
return "default"
|
||||
}
|
||||
return in
|
||||
})
|
||||
```
|
||||
|
||||
To get value with types:
|
||||
|
||||
```go
|
||||
// For boolean values:
|
||||
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
|
||||
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
|
||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
||||
v, err = cfg.Section("").Key("INT").Int()
|
||||
v, err = cfg.Section("").Key("INT64").Int64()
|
||||
v, err = cfg.Section("").Key("UINT").Uint()
|
||||
v, err = cfg.Section("").Key("UINT64").Uint64()
|
||||
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
|
||||
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
|
||||
|
||||
v = cfg.Section("").Key("BOOL").MustBool()
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64()
|
||||
v = cfg.Section("").Key("INT").MustInt()
|
||||
v = cfg.Section("").Key("INT64").MustInt64()
|
||||
v = cfg.Section("").Key("UINT").MustUint()
|
||||
v = cfg.Section("").Key("UINT64").MustUint64()
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
|
||||
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
|
||||
|
||||
// Methods start with Must also accept one argument for default value
|
||||
// when key not found or fail to parse value to given type.
|
||||
// Except method MustString, which you have to pass a default value.
|
||||
|
||||
v = cfg.Section("").Key("String").MustString("default")
|
||||
v = cfg.Section("").Key("BOOL").MustBool(true)
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
|
||||
v = cfg.Section("").Key("INT").MustInt(10)
|
||||
v = cfg.Section("").Key("INT64").MustInt64(99)
|
||||
v = cfg.Section("").Key("UINT").MustUint(3)
|
||||
v = cfg.Section("").Key("UINT64").MustUint64(6)
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
|
||||
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
|
||||
```
|
||||
|
||||
What if my value is three-line long?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
ADDRESS = """404 road,
|
||||
NotFound, State, 5000
|
||||
Earth"""
|
||||
```
|
||||
|
||||
Not a problem!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("ADDRESS").String()
|
||||
|
||||
/* --- start ---
|
||||
404 road,
|
||||
NotFound, State, 5000
|
||||
Earth
|
||||
------ end --- */
|
||||
```
|
||||
|
||||
That's cool, how about continuation lines?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
two_lines = how about \
|
||||
continuation lines?
|
||||
lots_of_lines = 1 \
|
||||
2 \
|
||||
3 \
|
||||
4
|
||||
```
|
||||
|
||||
Piece of cake!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
|
||||
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
|
||||
```
|
||||
|
||||
Note that single quotes around values will be stripped:
|
||||
|
||||
```ini
|
||||
foo = "some value" // foo: some value
|
||||
bar = 'some value' // bar: some value
|
||||
```
|
||||
|
||||
That's all? Hmm, no.
|
||||
|
||||
#### Helper methods of working with values
|
||||
|
||||
To get value with given candidates:
|
||||
|
||||
```go
|
||||
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
|
||||
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
|
||||
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
|
||||
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
|
||||
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
|
||||
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
|
||||
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
|
||||
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
|
||||
```
|
||||
|
||||
Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
|
||||
|
||||
To validate value in a given range:
|
||||
|
||||
```go
|
||||
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
|
||||
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
|
||||
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
|
||||
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
|
||||
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
|
||||
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
|
||||
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
|
||||
```
|
||||
|
||||
To auto-split value into slice:
|
||||
|
||||
```go
|
||||
vals = cfg.Section("").Key("STRINGS").Strings(",")
|
||||
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
|
||||
vals = cfg.Section("").Key("INTS").Ints(",")
|
||||
vals = cfg.Section("").Key("INT64S").Int64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").Uints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").Times(",")
|
||||
```
|
||||
|
||||
### Save your configuration
|
||||
|
||||
Finally, it's time to save your configuration to somewhere.
|
||||
|
||||
A typical way to save configuration is writing it to a file:
|
||||
|
||||
```go
|
||||
// ...
|
||||
err = cfg.SaveTo("my.ini")
|
||||
err = cfg.SaveToIndent("my.ini", "\t")
|
||||
```
|
||||
|
||||
Another way to save is writing to a `io.Writer` interface:
|
||||
|
||||
```go
|
||||
// ...
|
||||
cfg.WriteTo(writer)
|
||||
cfg.WriteToIndent(writer, "\t")
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Recursive Values
|
||||
|
||||
For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
|
||||
[author]
|
||||
NAME = Unknwon
|
||||
GITHUB = https://github.com/%(NAME)s
|
||||
|
||||
[package]
|
||||
FULL_NAME = github.com/go-ini/%(NAME)s
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
|
||||
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
|
||||
```
|
||||
|
||||
### Parent-child Sections
|
||||
|
||||
You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
VERSION = v1
|
||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
||||
|
||||
[package]
|
||||
CLONE_URL = https://%(IMPORT_PATH)s
|
||||
|
||||
[package.sub]
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
|
||||
```
|
||||
|
||||
### Auto-increment Key Names
|
||||
|
||||
If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
|
||||
|
||||
```ini
|
||||
[features]
|
||||
-: Support read/write comments of keys and sections
|
||||
-: Support auto-increment of key names
|
||||
-: Support load multiple files to overwrite key values
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
|
||||
```
|
||||
|
||||
### Map To Struct
|
||||
|
||||
Want more objective way to play with INI? Cool.
|
||||
|
||||
```ini
|
||||
Name = Unknwon
|
||||
age = 21
|
||||
Male = true
|
||||
Born = 1993-01-01T20:17:05Z
|
||||
|
||||
[Note]
|
||||
Content = Hi is a good man!
|
||||
Cities = HangZhou, Boston
|
||||
```
|
||||
|
||||
```go
|
||||
type Note struct {
|
||||
Content string
|
||||
Cities []string
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int `ini:"age"`
|
||||
Male bool
|
||||
Born time.Time
|
||||
Note
|
||||
Created time.Time `ini:"-"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := ini.Load("path/to/ini")
|
||||
// ...
|
||||
p := new(Person)
|
||||
err = cfg.MapTo(p)
|
||||
// ...
|
||||
|
||||
// Things can be simpler.
|
||||
err = ini.MapTo(p, "path/to/ini")
|
||||
// ...
|
||||
|
||||
// Just map a section? Fine.
|
||||
n := new(Note)
|
||||
err = cfg.Section("Note").MapTo(n)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Can I have default value for field? Absolutely.
|
||||
|
||||
Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
|
||||
|
||||
```go
|
||||
// ...
|
||||
p := &Person{
|
||||
Name: "Joe",
|
||||
}
|
||||
// ...
|
||||
```
|
||||
|
||||
It's really cool, but what's the point if you can't give me my file back from struct?
|
||||
|
||||
### Reflect From Struct
|
||||
|
||||
Why not?
|
||||
|
||||
```go
|
||||
type Embeded struct {
|
||||
Dates []time.Time `delim:"|"`
|
||||
Places []string
|
||||
None []int
|
||||
}
|
||||
|
||||
type Author struct {
|
||||
Name string `ini:"NAME"`
|
||||
Male bool
|
||||
Age int
|
||||
GPA float64
|
||||
NeverMind string `ini:"-"`
|
||||
*Embeded
|
||||
}
|
||||
|
||||
func main() {
|
||||
a := &Author{"Unknwon", true, 21, 2.8, "",
|
||||
&Embeded{
|
||||
[]time.Time{time.Now(), time.Now()},
|
||||
[]string{"HangZhou", "Boston"},
|
||||
[]int{},
|
||||
}}
|
||||
cfg := ini.Empty()
|
||||
err = ini.ReflectFrom(cfg, a)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
So, what do I get?
|
||||
|
||||
```ini
|
||||
NAME = Unknwon
|
||||
Male = true
|
||||
Age = 21
|
||||
GPA = 2.8
|
||||
|
||||
[Embeded]
|
||||
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
|
||||
Places = HangZhou,Boston
|
||||
None =
|
||||
```
|
||||
|
||||
#### Name Mapper
|
||||
|
||||
To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
|
||||
|
||||
There are 2 built-in name mappers:
|
||||
|
||||
- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
|
||||
- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
|
||||
|
||||
To use them:
|
||||
|
||||
```go
|
||||
type Info struct {
|
||||
PackageName string
|
||||
}
|
||||
|
||||
func main() {
|
||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
|
||||
// ...
|
||||
|
||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
||||
// ...
|
||||
info := new(Info)
|
||||
cfg.NameMapper = ini.AllCapsUnderscore
|
||||
err = cfg.MapTo(info)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
|
||||
|
||||
#### Other Notes On Map/Reflect
|
||||
|
||||
Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
|
||||
[Child]
|
||||
Age = 21
|
||||
```
|
||||
|
||||
What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child `ini:"Parent"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
Example configuration:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
Age = 21
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
||||
- [File An Issue](https://github.com/go-ini/ini/issues/new)
|
||||
|
||||
## FAQs
|
||||
|
||||
### What does `BlockMode` field do?
|
||||
|
||||
By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
|
||||
|
||||
### Why another INI library?
|
||||
|
||||
Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
|
||||
|
||||
To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
|
||||
|
||||
## License
|
||||
|
||||
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
547
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md
generated
vendored
Normal file
547
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md
generated
vendored
Normal file
|
@ -0,0 +1,547 @@
|
|||
本包提供了 Go 语言中读写 INI 文件的功能。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 支持覆盖加载多个数据源(`[]byte` 或文件)
|
||||
- 支持递归读取键值
|
||||
- 支持读取父子分区
|
||||
- 支持读取自增键名
|
||||
- 支持读取多行的键值
|
||||
- 支持大量辅助方法
|
||||
- 支持在读取时直接转换为 Go 语言类型
|
||||
- 支持读取和 **写入** 分区和键的注释
|
||||
- 轻松操作分区、键值和注释
|
||||
- 在保存文件时分区和键值会保持原有的顺序
|
||||
|
||||
## 下载安装
|
||||
|
||||
go get gopkg.in/ini.v1
|
||||
|
||||
## 开始使用
|
||||
|
||||
### 从数据源加载
|
||||
|
||||
一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
|
||||
|
||||
```go
|
||||
cfg, err := ini.Load([]byte("raw data"), "filename")
|
||||
```
|
||||
|
||||
或者从一个空白的文件开始:
|
||||
|
||||
```go
|
||||
cfg := ini.Empty()
|
||||
```
|
||||
|
||||
当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
|
||||
|
||||
```go
|
||||
err := cfg.Append("other file", []byte("other raw data"))
|
||||
```
|
||||
|
||||
### 操作分区(Section)
|
||||
|
||||
获取指定分区:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("section name")
|
||||
```
|
||||
|
||||
如果您想要获取默认分区,则可以用空字符串代替分区名:
|
||||
|
||||
```go
|
||||
section, err := cfg.GetSection("")
|
||||
```
|
||||
|
||||
当您非常确定某个分区是存在的,可以使用以下简便方法:
|
||||
|
||||
```go
|
||||
section := cfg.Section("")
|
||||
```
|
||||
|
||||
如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
|
||||
|
||||
创建一个分区:
|
||||
|
||||
```go
|
||||
err := cfg.NewSection("new section")
|
||||
```
|
||||
|
||||
获取所有分区对象或名称:
|
||||
|
||||
```go
|
||||
sections := cfg.Sections()
|
||||
names := cfg.SectionStrings()
|
||||
```
|
||||
|
||||
### 操作键(Key)
|
||||
|
||||
获取某个分区下的键:
|
||||
|
||||
```go
|
||||
key, err := cfg.Section("").GetKey("key name")
|
||||
```
|
||||
|
||||
和分区一样,您也可以直接获取键而忽略错误处理:
|
||||
|
||||
```go
|
||||
key := cfg.Section("").Key("key name")
|
||||
```
|
||||
|
||||
创建一个新的键:
|
||||
|
||||
```go
|
||||
err := cfg.Section("").NewKey("name", "value")
|
||||
```
|
||||
|
||||
获取分区下的所有键或键名:
|
||||
|
||||
```go
|
||||
keys := cfg.Section("").Keys()
|
||||
names := cfg.Section("").KeyStrings()
|
||||
```
|
||||
|
||||
获取分区下的所有键值对的克隆:
|
||||
|
||||
```go
|
||||
hash := cfg.GetSection("").KeysHash()
|
||||
```
|
||||
|
||||
### 操作键值(Value)
|
||||
|
||||
获取一个类型为字符串(string)的值:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").String()
|
||||
```
|
||||
|
||||
获取值的同时通过自定义函数进行处理验证:
|
||||
|
||||
```go
|
||||
val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
||||
if len(in) == 0 {
|
||||
return "default"
|
||||
}
|
||||
return in
|
||||
})
|
||||
```
|
||||
|
||||
获取其它类型的值:
|
||||
|
||||
```go
|
||||
// 布尔值的规则:
|
||||
// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
|
||||
// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
|
||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
||||
v, err = cfg.Section("").Key("INT").Int()
|
||||
v, err = cfg.Section("").Key("INT64").Int64()
|
||||
v, err = cfg.Section("").Key("UINT").Uint()
|
||||
v, err = cfg.Section("").Key("UINT64").Uint64()
|
||||
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
|
||||
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
|
||||
|
||||
v = cfg.Section("").Key("BOOL").MustBool()
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64()
|
||||
v = cfg.Section("").Key("INT").MustInt()
|
||||
v = cfg.Section("").Key("INT64").MustInt64()
|
||||
v = cfg.Section("").Key("UINT").MustUint()
|
||||
v = cfg.Section("").Key("UINT64").MustUint64()
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
|
||||
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
|
||||
|
||||
// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
|
||||
// 当键不存在或者转换失败时,则会直接返回该默认值。
|
||||
// 但是,MustString 方法必须传递一个默认值。
|
||||
|
||||
v = cfg.Seciont("").Key("String").MustString("default")
|
||||
v = cfg.Section("").Key("BOOL").MustBool(true)
|
||||
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
|
||||
v = cfg.Section("").Key("INT").MustInt(10)
|
||||
v = cfg.Section("").Key("INT64").MustInt64(99)
|
||||
v = cfg.Section("").Key("UINT").MustUint(3)
|
||||
v = cfg.Section("").Key("UINT64").MustUint64(6)
|
||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
|
||||
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
|
||||
```
|
||||
|
||||
如果我的值有好多行怎么办?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
ADDRESS = """404 road,
|
||||
NotFound, State, 5000
|
||||
Earth"""
|
||||
```
|
||||
|
||||
嗯哼?小 case!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("ADDRESS").String()
|
||||
|
||||
/* --- start ---
|
||||
404 road,
|
||||
NotFound, State, 5000
|
||||
Earth
|
||||
------ end --- */
|
||||
```
|
||||
|
||||
赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
|
||||
|
||||
```ini
|
||||
[advance]
|
||||
two_lines = how about \
|
||||
continuation lines?
|
||||
lots_of_lines = 1 \
|
||||
2 \
|
||||
3 \
|
||||
4
|
||||
```
|
||||
|
||||
简直是小菜一碟!
|
||||
|
||||
```go
|
||||
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
|
||||
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
|
||||
```
|
||||
|
||||
需要注意的是,值两侧的单引号会被自动剔除:
|
||||
|
||||
```ini
|
||||
foo = "some value" // foo: some value
|
||||
bar = 'some value' // bar: some value
|
||||
```
|
||||
|
||||
这就是全部了?哈哈,当然不是。
|
||||
|
||||
#### 操作键值的辅助方法
|
||||
|
||||
获取键值时设定候选值:
|
||||
|
||||
```go
|
||||
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
|
||||
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
|
||||
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
|
||||
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
|
||||
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
|
||||
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
|
||||
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
|
||||
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
|
||||
```
|
||||
|
||||
如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
|
||||
|
||||
验证获取的值是否在指定范围内:
|
||||
|
||||
```go
|
||||
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
|
||||
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
|
||||
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
|
||||
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
|
||||
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
|
||||
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
|
||||
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
|
||||
```
|
||||
|
||||
自动分割键值为切片(slice):
|
||||
|
||||
```go
|
||||
vals = cfg.Section("").Key("STRINGS").Strings(",")
|
||||
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
|
||||
vals = cfg.Section("").Key("INTS").Ints(",")
|
||||
vals = cfg.Section("").Key("INT64S").Int64s(",")
|
||||
vals = cfg.Section("").Key("UINTS").Uints(",")
|
||||
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
|
||||
vals = cfg.Section("").Key("TIMES").Times(",")
|
||||
```
|
||||
|
||||
### 保存配置
|
||||
|
||||
终于到了这个时刻,是时候保存一下配置了。
|
||||
|
||||
比较原始的做法是输出配置到某个文件:
|
||||
|
||||
```go
|
||||
// ...
|
||||
err = cfg.SaveTo("my.ini")
|
||||
err = cfg.SaveToIndent("my.ini", "\t")
|
||||
```
|
||||
|
||||
另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
|
||||
|
||||
```go
|
||||
// ...
|
||||
cfg.WriteTo(writer)
|
||||
cfg.WriteToIndent(writer, "\t")
|
||||
```
|
||||
|
||||
### 高级用法
|
||||
|
||||
#### 递归读取键值
|
||||
|
||||
在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
|
||||
[author]
|
||||
NAME = Unknwon
|
||||
GITHUB = https://github.com/%(NAME)s
|
||||
|
||||
[package]
|
||||
FULL_NAME = github.com/go-ini/%(NAME)s
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
|
||||
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
|
||||
```
|
||||
|
||||
#### 读取父子分区
|
||||
|
||||
您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
|
||||
|
||||
```ini
|
||||
NAME = ini
|
||||
VERSION = v1
|
||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
||||
|
||||
[package]
|
||||
CLONE_URL = https://%(IMPORT_PATH)s
|
||||
|
||||
[package.sub]
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
|
||||
```
|
||||
|
||||
#### 读取自增键名
|
||||
|
||||
如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
|
||||
|
||||
```ini
|
||||
[features]
|
||||
-: Support read/write comments of keys and sections
|
||||
-: Support auto-increment of key names
|
||||
-: Support load multiple files to overwrite key values
|
||||
```
|
||||
|
||||
```go
|
||||
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
|
||||
```
|
||||
|
||||
### 映射到结构
|
||||
|
||||
想要使用更加面向对象的方式玩转 INI 吗?好主意。
|
||||
|
||||
```ini
|
||||
Name = Unknwon
|
||||
age = 21
|
||||
Male = true
|
||||
Born = 1993-01-01T20:17:05Z
|
||||
|
||||
[Note]
|
||||
Content = Hi is a good man!
|
||||
Cities = HangZhou, Boston
|
||||
```
|
||||
|
||||
```go
|
||||
type Note struct {
|
||||
Content string
|
||||
Cities []string
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int `ini:"age"`
|
||||
Male bool
|
||||
Born time.Time
|
||||
Note
|
||||
Created time.Time `ini:"-"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := ini.Load("path/to/ini")
|
||||
// ...
|
||||
p := new(Person)
|
||||
err = cfg.MapTo(p)
|
||||
// ...
|
||||
|
||||
// 一切竟可以如此的简单。
|
||||
err = ini.MapTo(p, "path/to/ini")
|
||||
// ...
|
||||
|
||||
// 嗯哼?只需要映射一个分区吗?
|
||||
n := new(Note)
|
||||
err = cfg.Section("Note").MapTo(n)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
|
||||
|
||||
```go
|
||||
// ...
|
||||
p := &Person{
|
||||
Name: "Joe",
|
||||
}
|
||||
// ...
|
||||
```
|
||||
|
||||
这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
|
||||
|
||||
### 从结构反射
|
||||
|
||||
可是,我有说不能吗?
|
||||
|
||||
```go
|
||||
type Embeded struct {
|
||||
Dates []time.Time `delim:"|"`
|
||||
Places []string
|
||||
None []int
|
||||
}
|
||||
|
||||
type Author struct {
|
||||
Name string `ini:"NAME"`
|
||||
Male bool
|
||||
Age int
|
||||
GPA float64
|
||||
NeverMind string `ini:"-"`
|
||||
*Embeded
|
||||
}
|
||||
|
||||
func main() {
|
||||
a := &Author{"Unknwon", true, 21, 2.8, "",
|
||||
&Embeded{
|
||||
[]time.Time{time.Now(), time.Now()},
|
||||
[]string{"HangZhou", "Boston"},
|
||||
[]int{},
|
||||
}}
|
||||
cfg := ini.Empty()
|
||||
err = ini.ReflectFrom(cfg, a)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
瞧瞧,奇迹发生了。
|
||||
|
||||
```ini
|
||||
NAME = Unknwon
|
||||
Male = true
|
||||
Age = 21
|
||||
GPA = 2.8
|
||||
|
||||
[Embeded]
|
||||
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
|
||||
Places = HangZhou,Boston
|
||||
None =
|
||||
```
|
||||
|
||||
#### 名称映射器(Name Mapper)
|
||||
|
||||
为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
|
||||
|
||||
目前有 2 款内置的映射器:
|
||||
|
||||
- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
|
||||
- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
|
||||
|
||||
使用方法:
|
||||
|
||||
```go
|
||||
type Info struct{
|
||||
PackageName string
|
||||
}
|
||||
|
||||
func main() {
|
||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
|
||||
// ...
|
||||
|
||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
||||
// ...
|
||||
info := new(Info)
|
||||
cfg.NameMapper = ini.AllCapsUnderscore
|
||||
err = cfg.MapTo(info)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
|
||||
|
||||
#### 映射/反射的其它说明
|
||||
|
||||
任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
示例配置文件:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
|
||||
[Child]
|
||||
Age = 21
|
||||
```
|
||||
|
||||
很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
|
||||
|
||||
```go
|
||||
type Child struct {
|
||||
Age string
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Name string
|
||||
Child `ini:"Parent"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
City string
|
||||
Parent
|
||||
}
|
||||
```
|
||||
|
||||
示例配置文件:
|
||||
|
||||
```ini
|
||||
City = Boston
|
||||
|
||||
[Parent]
|
||||
Name = Unknwon
|
||||
Age = 21
|
||||
```
|
||||
|
||||
## 获取帮助
|
||||
|
||||
- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
|
||||
- [创建工单](https://github.com/go-ini/ini/issues/new)
|
||||
|
||||
## 常见问题
|
||||
|
||||
### 字段 `BlockMode` 是什么?
|
||||
|
||||
默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
|
||||
|
||||
### 为什么要写另一个 INI 解析库?
|
||||
|
||||
许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
|
||||
|
||||
为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
|
1226
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go
generated
vendored
Normal file
1226
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
350
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go
generated
vendored
Normal file
350
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go
generated
vendored
Normal file
|
@ -0,0 +1,350 @@
|
|||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// NameMapper represents a ini tag name mapper.
|
||||
type NameMapper func(string) string
|
||||
|
||||
// Built-in name getters.
|
||||
var (
|
||||
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
|
||||
AllCapsUnderscore NameMapper = func(raw string) string {
|
||||
newstr := make([]rune, 0, len(raw))
|
||||
for i, chr := range raw {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if i > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
}
|
||||
newstr = append(newstr, unicode.ToUpper(chr))
|
||||
}
|
||||
return string(newstr)
|
||||
}
|
||||
// TitleUnderscore converts to format title_underscore.
|
||||
TitleUnderscore NameMapper = func(raw string) string {
|
||||
newstr := make([]rune, 0, len(raw))
|
||||
for i, chr := range raw {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if i > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
chr -= ('A' - 'a')
|
||||
}
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
return string(newstr)
|
||||
}
|
||||
)
|
||||
|
||||
func (s *Section) parseFieldName(raw, actual string) string {
|
||||
if len(actual) > 0 {
|
||||
return actual
|
||||
}
|
||||
if s.f.NameMapper != nil {
|
||||
return s.f.NameMapper(raw)
|
||||
}
|
||||
return raw
|
||||
}
|
||||
|
||||
func parseDelim(actual string) string {
|
||||
if len(actual) > 0 {
|
||||
return actual
|
||||
}
|
||||
return ","
|
||||
}
|
||||
|
||||
var reflectTime = reflect.TypeOf(time.Now()).Kind()
|
||||
|
||||
// setWithProperType sets proper value to field based on its type,
|
||||
// but it does not return error for failing parsing,
|
||||
// because we want to use default value that is already assigned to strcut.
|
||||
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if len(key.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
field.SetString(key.String())
|
||||
case reflect.Bool:
|
||||
boolVal, err := key.Bool()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
field.SetBool(boolVal)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
durationVal, err := key.Duration()
|
||||
if err == nil {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
intVal, err := key.Int64()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
field.SetInt(intVal)
|
||||
// byte is an alias for uint8, so supporting uint8 breaks support for byte
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
durationVal, err := key.Duration()
|
||||
if err == nil {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
uintVal, err := key.Uint64()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
field.SetUint(uintVal)
|
||||
|
||||
case reflect.Float64:
|
||||
floatVal, err := key.Float64()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
field.SetFloat(floatVal)
|
||||
case reflectTime:
|
||||
timeVal, err := key.Time()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
field.Set(reflect.ValueOf(timeVal))
|
||||
case reflect.Slice:
|
||||
vals := key.Strings(delim)
|
||||
numVals := len(vals)
|
||||
if numVals == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sliceOf := field.Type().Elem().Kind()
|
||||
|
||||
var times []time.Time
|
||||
if sliceOf == reflectTime {
|
||||
times = key.Times(delim)
|
||||
}
|
||||
|
||||
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
|
||||
for i := 0; i < numVals; i++ {
|
||||
switch sliceOf {
|
||||
case reflectTime:
|
||||
slice.Index(i).Set(reflect.ValueOf(times[i]))
|
||||
default:
|
||||
slice.Index(i).Set(reflect.ValueOf(vals[i]))
|
||||
}
|
||||
}
|
||||
field.Set(slice)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Section) mapTo(val reflect.Value) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
tag := tpField.Tag.Get("ini")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := s.parseFieldName(tpField.Name, tag)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
||||
isStruct := tpField.Type.Kind() == reflect.Struct
|
||||
if isAnonymous {
|
||||
field.Set(reflect.New(tpField.Type.Elem()))
|
||||
}
|
||||
|
||||
if isAnonymous || isStruct {
|
||||
if sec, err := s.f.GetSection(fieldName); err == nil {
|
||||
if err = sec.mapTo(field); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if key, err := s.GetKey(fieldName); err == nil {
|
||||
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
|
||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapTo maps section to given struct.
|
||||
func (s *Section) MapTo(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot map to non-pointer struct")
|
||||
}
|
||||
|
||||
return s.mapTo(val)
|
||||
}
|
||||
|
||||
// MapTo maps file to given struct.
|
||||
func (f *File) MapTo(v interface{}) error {
|
||||
return f.Section("").MapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct with name mapper.
|
||||
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
||||
cfg, err := Load(source, others...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.MapTo(v)
|
||||
}
|
||||
|
||||
// MapTo maps data sources to given struct.
|
||||
func MapTo(v, source interface{}, others ...interface{}) error {
|
||||
return MapToWithMapper(v, nil, source, others...)
|
||||
}
|
||||
|
||||
// reflectWithProperType does the opposite thing with setWithProperType.
|
||||
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
key.SetValue(field.String())
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Float64,
|
||||
reflectTime:
|
||||
key.SetValue(fmt.Sprint(field))
|
||||
case reflect.Slice:
|
||||
vals := field.Slice(0, field.Len())
|
||||
if field.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
isTime := fmt.Sprint(field.Type()) == "[]time.Time"
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
if isTime {
|
||||
buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprint(vals.Index(i)))
|
||||
}
|
||||
buf.WriteString(delim)
|
||||
}
|
||||
key.SetValue(buf.String()[:buf.Len()-1])
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '%s'", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Section) reflectFrom(val reflect.Value) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
tpField := typ.Field(i)
|
||||
|
||||
tag := tpField.Tag.Get("ini")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := s.parseFieldName(tpField.Name, tag)
|
||||
if len(fieldName) == 0 || !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
|
||||
(tpField.Type.Kind() == reflect.Struct) {
|
||||
// Note: The only error here is section doesn't exist.
|
||||
sec, err := s.f.GetSection(fieldName)
|
||||
if err != nil {
|
||||
// Note: fieldName can never be empty here, ignore error.
|
||||
sec, _ = s.f.NewSection(fieldName)
|
||||
}
|
||||
if err = sec.reflectFrom(field); err != nil {
|
||||
return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: Same reason as secion.
|
||||
key, err := s.GetKey(fieldName)
|
||||
if err != nil {
|
||||
key, _ = s.NewKey(fieldName, "")
|
||||
}
|
||||
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
|
||||
return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReflectFrom reflects secion from given struct.
|
||||
func (s *Section) ReflectFrom(v interface{}) error {
|
||||
typ := reflect.TypeOf(v)
|
||||
val := reflect.ValueOf(v)
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
val = val.Elem()
|
||||
} else {
|
||||
return errors.New("cannot reflect from non-pointer struct")
|
||||
}
|
||||
|
||||
return s.reflectFrom(val)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects file from given struct.
|
||||
func (f *File) ReflectFrom(v interface{}) error {
|
||||
return f.Section("").ReflectFrom(v)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects data sources from given struct with name mapper.
|
||||
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
|
||||
cfg.NameMapper = mapper
|
||||
return cfg.ReflectFrom(v)
|
||||
}
|
||||
|
||||
// ReflectFrom reflects data sources from given struct.
|
||||
func ReflectFrom(cfg *File, v interface{}) error {
|
||||
return ReflectFromWithMapper(cfg, v, nil)
|
||||
}
|
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore
generated
vendored
Normal file
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
jpgo
|
||||
jmespath-fuzz.zip
|
||||
cpu.out
|
||||
go-jmespath.test
|
9
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
|
||||
install: go get -v -t ./...
|
||||
script: make test
|
13
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE
generated
vendored
Normal file
13
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Copyright 2015 James Saryerwinnie
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
44
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile
generated
vendored
Normal file
44
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
|
||||
CMD = jpgo
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " test to run all the tests"
|
||||
@echo " build to build the library and jp executable"
|
||||
@echo " generate to run codegen"
|
||||
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
build:
|
||||
rm -f $(CMD)
|
||||
go build ./...
|
||||
rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
|
||||
mv cmd/$(CMD)/$(CMD) .
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
check:
|
||||
go vet ./...
|
||||
@echo "golint ./..."
|
||||
@lint=`golint ./...`; \
|
||||
lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
|
||||
echo "$$lint"; \
|
||||
if [ "$$lint" != "" ]; then exit 1; fi
|
||||
|
||||
htmlc:
|
||||
go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
|
||||
|
||||
buildfuzz:
|
||||
go-fuzz-build github.com/jmespath/go-jmespath/fuzz
|
||||
|
||||
fuzz: buildfuzz
|
||||
go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
|
||||
|
||||
bench:
|
||||
go test -bench . -cpuprofile cpu.out
|
||||
|
||||
pprof-cpu:
|
||||
go tool pprof ./go-jmespath.test ./cpu.out
|
7
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md
generated
vendored
Normal file
7
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
# go-jmespath - A JMESPath implementation in Go
|
||||
|
||||
[](https://travis-ci.org/jmespath/go-jmespath)
|
||||
|
||||
|
||||
|
||||
See http://jmespath.org for more info.
|
49
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
Normal file
49
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
package jmespath
|
||||
|
||||
import "strconv"
|
||||
|
||||
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
|
||||
// safe for concurrent use by multiple goroutines.
|
||||
type JMESPath struct {
|
||||
ast ASTNode
|
||||
intr *treeInterpreter
|
||||
}
|
||||
|
||||
// Compile parses a JMESPath expression and returns, if successful, a JMESPath
|
||||
// object that can be used to match against data.
|
||||
func Compile(expression string) (*JMESPath, error) {
|
||||
parser := NewParser()
|
||||
ast, err := parser.Parse(expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
|
||||
return jmespath, nil
|
||||
}
|
||||
|
||||
// MustCompile is like Compile but panics if the expression cannot be parsed.
|
||||
// It simplifies safe initialization of global variables holding compiled
|
||||
// JMESPaths.
|
||||
func MustCompile(expression string) *JMESPath {
|
||||
jmespath, err := Compile(expression)
|
||||
if err != nil {
|
||||
panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
|
||||
}
|
||||
return jmespath
|
||||
}
|
||||
|
||||
// Search evaluates a JMESPath expression against input data and returns the result.
|
||||
func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
|
||||
return jp.intr.Execute(jp.ast, data)
|
||||
}
|
||||
|
||||
// Search evaluates a JMESPath expression against input data and returns the result.
|
||||
func Search(expression string, data interface{}) (interface{}, error) {
|
||||
intr := newInterpreter()
|
||||
parser := NewParser()
|
||||
ast, err := parser.Parse(expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return intr.Execute(ast, data)
|
||||
}
|
16
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
generated
vendored
Normal file
16
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// generated by stringer -type astNodeType; DO NOT EDIT
|
||||
|
||||
package jmespath
|
||||
|
||||
import "fmt"
|
||||
|
||||
const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
|
||||
|
||||
var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
|
||||
|
||||
func (i astNodeType) String() string {
|
||||
if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
|
||||
return fmt.Sprintf("astNodeType(%d)", i)
|
||||
}
|
||||
return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
|
||||
}
|
842
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go
generated
vendored
Normal file
842
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go
generated
vendored
Normal file
|
@ -0,0 +1,842 @@
|
|||
package jmespath
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type jpFunction func(arguments []interface{}) (interface{}, error)
|
||||
|
||||
type jpType string
|
||||
|
||||
const (
|
||||
jpUnknown jpType = "unknown"
|
||||
jpNumber jpType = "number"
|
||||
jpString jpType = "string"
|
||||
jpArray jpType = "array"
|
||||
jpObject jpType = "object"
|
||||
jpArrayNumber jpType = "array[number]"
|
||||
jpArrayString jpType = "array[string]"
|
||||
jpExpref jpType = "expref"
|
||||
jpAny jpType = "any"
|
||||
)
|
||||
|
||||
type functionEntry struct {
|
||||
name string
|
||||
arguments []argSpec
|
||||
handler jpFunction
|
||||
hasExpRef bool
|
||||
}
|
||||
|
||||
type argSpec struct {
|
||||
types []jpType
|
||||
variadic bool
|
||||
}
|
||||
|
||||
type byExprString struct {
|
||||
intr *treeInterpreter
|
||||
node ASTNode
|
||||
items []interface{}
|
||||
hasError bool
|
||||
}
|
||||
|
||||
func (a *byExprString) Len() int {
|
||||
return len(a.items)
|
||||
}
|
||||
func (a *byExprString) Swap(i, j int) {
|
||||
a.items[i], a.items[j] = a.items[j], a.items[i]
|
||||
}
|
||||
func (a *byExprString) Less(i, j int) bool {
|
||||
first, err := a.intr.Execute(a.node, a.items[i])
|
||||
if err != nil {
|
||||
a.hasError = true
|
||||
// Return a dummy value.
|
||||
return true
|
||||
}
|
||||
ith, ok := first.(string)
|
||||
if !ok {
|
||||
a.hasError = true
|
||||
return true
|
||||
}
|
||||
second, err := a.intr.Execute(a.node, a.items[j])
|
||||
if err != nil {
|
||||
a.hasError = true
|
||||
// Return a dummy value.
|
||||
return true
|
||||
}
|
||||
jth, ok := second.(string)
|
||||
if !ok {
|
||||
a.hasError = true
|
||||
return true
|
||||
}
|
||||
return ith < jth
|
||||
}
|
||||
|
||||
type byExprFloat struct {
|
||||
intr *treeInterpreter
|
||||
node ASTNode
|
||||
items []interface{}
|
||||
hasError bool
|
||||
}
|
||||
|
||||
func (a *byExprFloat) Len() int {
|
||||
return len(a.items)
|
||||
}
|
||||
func (a *byExprFloat) Swap(i, j int) {
|
||||
a.items[i], a.items[j] = a.items[j], a.items[i]
|
||||
}
|
||||
func (a *byExprFloat) Less(i, j int) bool {
|
||||
first, err := a.intr.Execute(a.node, a.items[i])
|
||||
if err != nil {
|
||||
a.hasError = true
|
||||
// Return a dummy value.
|
||||
return true
|
||||
}
|
||||
ith, ok := first.(float64)
|
||||
if !ok {
|
||||
a.hasError = true
|
||||
return true
|
||||
}
|
||||
second, err := a.intr.Execute(a.node, a.items[j])
|
||||
if err != nil {
|
||||
a.hasError = true
|
||||
// Return a dummy value.
|
||||
return true
|
||||
}
|
||||
jth, ok := second.(float64)
|
||||
if !ok {
|
||||
a.hasError = true
|
||||
return true
|
||||
}
|
||||
return ith < jth
|
||||
}
|
||||
|
||||
type functionCaller struct {
|
||||
functionTable map[string]functionEntry
|
||||
}
|
||||
|
||||
func newFunctionCaller() *functionCaller {
|
||||
caller := &functionCaller{}
|
||||
caller.functionTable = map[string]functionEntry{
|
||||
"length": {
|
||||
name: "length",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpString, jpArray, jpObject}},
|
||||
},
|
||||
handler: jpfLength,
|
||||
},
|
||||
"starts_with": {
|
||||
name: "starts_with",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpString}},
|
||||
{types: []jpType{jpString}},
|
||||
},
|
||||
handler: jpfStartsWith,
|
||||
},
|
||||
"abs": {
|
||||
name: "abs",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpNumber}},
|
||||
},
|
||||
handler: jpfAbs,
|
||||
},
|
||||
"avg": {
|
||||
name: "avg",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArrayNumber}},
|
||||
},
|
||||
handler: jpfAvg,
|
||||
},
|
||||
"ceil": {
|
||||
name: "ceil",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpNumber}},
|
||||
},
|
||||
handler: jpfCeil,
|
||||
},
|
||||
"contains": {
|
||||
name: "contains",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArray, jpString}},
|
||||
{types: []jpType{jpAny}},
|
||||
},
|
||||
handler: jpfContains,
|
||||
},
|
||||
"ends_with": {
|
||||
name: "ends_with",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpString}},
|
||||
{types: []jpType{jpString}},
|
||||
},
|
||||
handler: jpfEndsWith,
|
||||
},
|
||||
"floor": {
|
||||
name: "floor",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpNumber}},
|
||||
},
|
||||
handler: jpfFloor,
|
||||
},
|
||||
"map": {
|
||||
name: "amp",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpExpref}},
|
||||
{types: []jpType{jpArray}},
|
||||
},
|
||||
handler: jpfMap,
|
||||
hasExpRef: true,
|
||||
},
|
||||
"max": {
|
||||
name: "max",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArrayNumber, jpArrayString}},
|
||||
},
|
||||
handler: jpfMax,
|
||||
},
|
||||
"merge": {
|
||||
name: "merge",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpObject}, variadic: true},
|
||||
},
|
||||
handler: jpfMerge,
|
||||
},
|
||||
"max_by": {
|
||||
name: "max_by",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArray}},
|
||||
{types: []jpType{jpExpref}},
|
||||
},
|
||||
handler: jpfMaxBy,
|
||||
hasExpRef: true,
|
||||
},
|
||||
"sum": {
|
||||
name: "sum",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArrayNumber}},
|
||||
},
|
||||
handler: jpfSum,
|
||||
},
|
||||
"min": {
|
||||
name: "min",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArrayNumber, jpArrayString}},
|
||||
},
|
||||
handler: jpfMin,
|
||||
},
|
||||
"min_by": {
|
||||
name: "min_by",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArray}},
|
||||
{types: []jpType{jpExpref}},
|
||||
},
|
||||
handler: jpfMinBy,
|
||||
hasExpRef: true,
|
||||
},
|
||||
"type": {
|
||||
name: "type",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpAny}},
|
||||
},
|
||||
handler: jpfType,
|
||||
},
|
||||
"keys": {
|
||||
name: "keys",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpObject}},
|
||||
},
|
||||
handler: jpfKeys,
|
||||
},
|
||||
"values": {
|
||||
name: "values",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpObject}},
|
||||
},
|
||||
handler: jpfValues,
|
||||
},
|
||||
"sort": {
|
||||
name: "sort",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArrayString, jpArrayNumber}},
|
||||
},
|
||||
handler: jpfSort,
|
||||
},
|
||||
"sort_by": {
|
||||
name: "sort_by",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArray}},
|
||||
{types: []jpType{jpExpref}},
|
||||
},
|
||||
handler: jpfSortBy,
|
||||
hasExpRef: true,
|
||||
},
|
||||
"join": {
|
||||
name: "join",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpString}},
|
||||
{types: []jpType{jpArrayString}},
|
||||
},
|
||||
handler: jpfJoin,
|
||||
},
|
||||
"reverse": {
|
||||
name: "reverse",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpArray, jpString}},
|
||||
},
|
||||
handler: jpfReverse,
|
||||
},
|
||||
"to_array": {
|
||||
name: "to_array",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpAny}},
|
||||
},
|
||||
handler: jpfToArray,
|
||||
},
|
||||
"to_string": {
|
||||
name: "to_string",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpAny}},
|
||||
},
|
||||
handler: jpfToString,
|
||||
},
|
||||
"to_number": {
|
||||
name: "to_number",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpAny}},
|
||||
},
|
||||
handler: jpfToNumber,
|
||||
},
|
||||
"not_null": {
|
||||
name: "not_null",
|
||||
arguments: []argSpec{
|
||||
{types: []jpType{jpAny}, variadic: true},
|
||||
},
|
||||
handler: jpfNotNull,
|
||||
},
|
||||
}
|
||||
return caller
|
||||
}
|
||||
|
||||
func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
|
||||
if len(e.arguments) == 0 {
|
||||
return arguments, nil
|
||||
}
|
||||
if !e.arguments[len(e.arguments)-1].variadic {
|
||||
if len(e.arguments) != len(arguments) {
|
||||
return nil, errors.New("incorrect number of args")
|
||||
}
|
||||
for i, spec := range e.arguments {
|
||||
userArg := arguments[i]
|
||||
err := spec.typeCheck(userArg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return arguments, nil
|
||||
}
|
||||
if len(arguments) < len(e.arguments) {
|
||||
return nil, errors.New("Invalid arity.")
|
||||
}
|
||||
return arguments, nil
|
||||
}
|
||||
|
||||
func (a *argSpec) typeCheck(arg interface{}) error {
|
||||
for _, t := range a.types {
|
||||
switch t {
|
||||
case jpNumber:
|
||||
if _, ok := arg.(float64); ok {
|
||||
return nil
|
||||
}
|
||||
case jpString:
|
||||
if _, ok := arg.(string); ok {
|
||||
return nil
|
||||
}
|
||||
case jpArray:
|
||||
if isSliceType(arg) {
|
||||
return nil
|
||||
}
|
||||
case jpObject:
|
||||
if _, ok := arg.(map[string]interface{}); ok {
|
||||
return nil
|
||||
}
|
||||
case jpArrayNumber:
|
||||
if _, ok := toArrayNum(arg); ok {
|
||||
return nil
|
||||
}
|
||||
case jpArrayString:
|
||||
if _, ok := toArrayStr(arg); ok {
|
||||
return nil
|
||||
}
|
||||
case jpAny:
|
||||
return nil
|
||||
case jpExpref:
|
||||
if _, ok := arg.(expRef); ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
|
||||
}
|
||||
|
||||
func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
|
||||
entry, ok := f.functionTable[name]
|
||||
if !ok {
|
||||
return nil, errors.New("unknown function: " + name)
|
||||
}
|
||||
resolvedArgs, err := entry.resolveArgs(arguments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry.hasExpRef {
|
||||
var extra []interface{}
|
||||
extra = append(extra, intr)
|
||||
resolvedArgs = append(extra, resolvedArgs...)
|
||||
}
|
||||
return entry.handler(resolvedArgs)
|
||||
}
|
||||
|
||||
func jpfAbs(arguments []interface{}) (interface{}, error) {
|
||||
num := arguments[0].(float64)
|
||||
return math.Abs(num), nil
|
||||
}
|
||||
|
||||
func jpfLength(arguments []interface{}) (interface{}, error) {
|
||||
arg := arguments[0]
|
||||
if c, ok := arg.(string); ok {
|
||||
return float64(utf8.RuneCountInString(c)), nil
|
||||
} else if isSliceType(arg) {
|
||||
v := reflect.ValueOf(arg)
|
||||
return float64(v.Len()), nil
|
||||
} else if c, ok := arg.(map[string]interface{}); ok {
|
||||
return float64(len(c)), nil
|
||||
}
|
||||
return nil, errors.New("could not compute length()")
|
||||
}
|
||||
|
||||
func jpfStartsWith(arguments []interface{}) (interface{}, error) {
|
||||
search := arguments[0].(string)
|
||||
prefix := arguments[1].(string)
|
||||
return strings.HasPrefix(search, prefix), nil
|
||||
}
|
||||
|
||||
func jpfAvg(arguments []interface{}) (interface{}, error) {
|
||||
// We've already type checked the value so we can safely use
|
||||
// type assertions.
|
||||
args := arguments[0].([]interface{})
|
||||
length := float64(len(args))
|
||||
numerator := 0.0
|
||||
for _, n := range args {
|
||||
numerator += n.(float64)
|
||||
}
|
||||
return numerator / length, nil
|
||||
}
|
||||
func jpfCeil(arguments []interface{}) (interface{}, error) {
|
||||
val := arguments[0].(float64)
|
||||
return math.Ceil(val), nil
|
||||
}
|
||||
func jpfContains(arguments []interface{}) (interface{}, error) {
|
||||
search := arguments[0]
|
||||
el := arguments[1]
|
||||
if searchStr, ok := search.(string); ok {
|
||||
if elStr, ok := el.(string); ok {
|
||||
return strings.Index(searchStr, elStr) != -1, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
// Otherwise this is a generic contains for []interface{}
|
||||
general := search.([]interface{})
|
||||
for _, item := range general {
|
||||
if item == el {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
func jpfEndsWith(arguments []interface{}) (interface{}, error) {
|
||||
search := arguments[0].(string)
|
||||
suffix := arguments[1].(string)
|
||||
return strings.HasSuffix(search, suffix), nil
|
||||
}
|
||||
func jpfFloor(arguments []interface{}) (interface{}, error) {
|
||||
val := arguments[0].(float64)
|
||||
return math.Floor(val), nil
|
||||
}
|
||||
func jpfMap(arguments []interface{}) (interface{}, error) {
|
||||
intr := arguments[0].(*treeInterpreter)
|
||||
exp := arguments[1].(expRef)
|
||||
node := exp.ref
|
||||
arr := arguments[2].([]interface{})
|
||||
mapped := make([]interface{}, 0, len(arr))
|
||||
for _, value := range arr {
|
||||
current, err := intr.Execute(node, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapped = append(mapped, current)
|
||||
}
|
||||
return mapped, nil
|
||||
}
|
||||
func jpfMax(arguments []interface{}) (interface{}, error) {
|
||||
if items, ok := toArrayNum(arguments[0]); ok {
|
||||
if len(items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(items) == 1 {
|
||||
return items[0], nil
|
||||
}
|
||||
best := items[0]
|
||||
for _, item := range items[1:] {
|
||||
if item > best {
|
||||
best = item
|
||||
}
|
||||
}
|
||||
return best, nil
|
||||
}
|
||||
// Otherwise we're dealing with a max() of strings.
|
||||
items, _ := toArrayStr(arguments[0])
|
||||
if len(items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(items) == 1 {
|
||||
return items[0], nil
|
||||
}
|
||||
best := items[0]
|
||||
for _, item := range items[1:] {
|
||||
if item > best {
|
||||
best = item
|
||||
}
|
||||
}
|
||||
return best, nil
|
||||
}
|
||||
func jpfMerge(arguments []interface{}) (interface{}, error) {
|
||||
final := make(map[string]interface{})
|
||||
for _, m := range arguments {
|
||||
mapped := m.(map[string]interface{})
|
||||
for key, value := range mapped {
|
||||
final[key] = value
|
||||
}
|
||||
}
|
||||
return final, nil
|
||||
}
|
||||
func jpfMaxBy(arguments []interface{}) (interface{}, error) {
|
||||
intr := arguments[0].(*treeInterpreter)
|
||||
arr := arguments[1].([]interface{})
|
||||
exp := arguments[2].(expRef)
|
||||
node := exp.ref
|
||||
if len(arr) == 0 {
|
||||
return nil, nil
|
||||
} else if len(arr) == 1 {
|
||||
return arr[0], nil
|
||||
}
|
||||
start, err := intr.Execute(node, arr[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch t := start.(type) {
|
||||
case float64:
|
||||
bestVal := t
|
||||
bestItem := arr[0]
|
||||
for _, item := range arr[1:] {
|
||||
result, err := intr.Execute(node, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
current, ok := result.(float64)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid type, must be number")
|
||||
}
|
||||
if current > bestVal {
|
||||
bestVal = current
|
||||
bestItem = item
|
||||
}
|
||||
}
|
||||
return bestItem, nil
|
||||
case string:
|
||||
bestVal := t
|
||||
bestItem := arr[0]
|
||||
for _, item := range arr[1:] {
|
||||
result, err := intr.Execute(node, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
current, ok := result.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid type, must be string")
|
||||
}
|
||||
if current > bestVal {
|
||||
bestVal = current
|
||||
bestItem = item
|
||||
}
|
||||
}
|
||||
return bestItem, nil
|
||||
default:
|
||||
return nil, errors.New("invalid type, must be number of string")
|
||||
}
|
||||
}
|
||||
func jpfSum(arguments []interface{}) (interface{}, error) {
|
||||
items, _ := toArrayNum(arguments[0])
|
||||
sum := 0.0
|
||||
for _, item := range items {
|
||||
sum += item
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func jpfMin(arguments []interface{}) (interface{}, error) {
|
||||
if items, ok := toArrayNum(arguments[0]); ok {
|
||||
if len(items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(items) == 1 {
|
||||
return items[0], nil
|
||||
}
|
||||
best := items[0]
|
||||
for _, item := range items[1:] {
|
||||
if item < best {
|
||||
best = item
|
||||
}
|
||||
}
|
||||
return best, nil
|
||||
}
|
||||
items, _ := toArrayStr(arguments[0])
|
||||
if len(items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(items) == 1 {
|
||||
return items[0], nil
|
||||
}
|
||||
best := items[0]
|
||||
for _, item := range items[1:] {
|
||||
if item < best {
|
||||
best = item
|
||||
}
|
||||
}
|
||||
return best, nil
|
||||
}
|
||||
|
||||
func jpfMinBy(arguments []interface{}) (interface{}, error) {
|
||||
intr := arguments[0].(*treeInterpreter)
|
||||
arr := arguments[1].([]interface{})
|
||||
exp := arguments[2].(expRef)
|
||||
node := exp.ref
|
||||
if len(arr) == 0 {
|
||||
return nil, nil
|
||||
} else if len(arr) == 1 {
|
||||
return arr[0], nil
|
||||
}
|
||||
start, err := intr.Execute(node, arr[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t, ok := start.(float64); ok {
|
||||
bestVal := t
|
||||
bestItem := arr[0]
|
||||
for _, item := range arr[1:] {
|
||||
result, err := intr.Execute(node, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
current, ok := result.(float64)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid type, must be number")
|
||||
}
|
||||
if current < bestVal {
|
||||
bestVal = current
|
||||
bestItem = item
|
||||
}
|
||||
}
|
||||
return bestItem, nil
|
||||
} else if t, ok := start.(string); ok {
|
||||
bestVal := t
|
||||
bestItem := arr[0]
|
||||
for _, item := range arr[1:] {
|
||||
result, err := intr.Execute(node, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
current, ok := result.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid type, must be string")
|
||||
}
|
||||
if current < bestVal {
|
||||
bestVal = current
|
||||
bestItem = item
|
||||
}
|
||||
}
|
||||
return bestItem, nil
|
||||
} else {
|
||||
return nil, errors.New("invalid type, must be number of string")
|
||||
}
|
||||
}
|
||||
func jpfType(arguments []interface{}) (interface{}, error) {
|
||||
arg := arguments[0]
|
||||
if _, ok := arg.(float64); ok {
|
||||
return "number", nil
|
||||
}
|
||||
if _, ok := arg.(string); ok {
|
||||
return "string", nil
|
||||
}
|
||||
if _, ok := arg.([]interface{}); ok {
|
||||
return "array", nil
|
||||
}
|
||||
if _, ok := arg.(map[string]interface{}); ok {
|
||||
return "object", nil
|
||||
}
|
||||
if arg == nil {
|
||||
return "null", nil
|
||||
}
|
||||
if arg == true || arg == false {
|
||||
return "boolean", nil
|
||||
}
|
||||
return nil, errors.New("unknown type")
|
||||
}
|
||||
func jpfKeys(arguments []interface{}) (interface{}, error) {
|
||||
arg := arguments[0].(map[string]interface{})
|
||||
collected := make([]interface{}, 0, len(arg))
|
||||
for key := range arg {
|
||||
collected = append(collected, key)
|
||||
}
|
||||
return collected, nil
|
||||
}
|
||||
func jpfValues(arguments []interface{}) (interface{}, error) {
|
||||
arg := arguments[0].(map[string]interface{})
|
||||
collected := make([]interface{}, 0, len(arg))
|
||||
for _, value := range arg {
|
||||
collected = append(collected, value)
|
||||
}
|
||||
return collected, nil
|
||||
}
|
||||
func jpfSort(arguments []interface{}) (interface{}, error) {
|
||||
if items, ok := toArrayNum(arguments[0]); ok {
|
||||
d := sort.Float64Slice(items)
|
||||
sort.Stable(d)
|
||||
final := make([]interface{}, len(d))
|
||||
for i, val := range d {
|
||||
final[i] = val
|
||||
}
|
||||
return final, nil
|
||||
}
|
||||
// Otherwise we're dealing with sort()'ing strings.
|
||||
items, _ := toArrayStr(arguments[0])
|
||||
d := sort.StringSlice(items)
|
||||
sort.Stable(d)
|
||||
final := make([]interface{}, len(d))
|
||||
for i, val := range d {
|
||||
final[i] = val
|
||||
}
|
||||
return final, nil
|
||||
}
|
||||
func jpfSortBy(arguments []interface{}) (interface{}, error) {
|
||||
intr := arguments[0].(*treeInterpreter)
|
||||
arr := arguments[1].([]interface{})
|
||||
exp := arguments[2].(expRef)
|
||||
node := exp.ref
|
||||
if len(arr) == 0 {
|
||||
return arr, nil
|
||||
} else if len(arr) == 1 {
|
||||
return arr, nil
|
||||
}
|
||||
start, err := intr.Execute(node, arr[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := start.(float64); ok {
|
||||
sortable := &byExprFloat{intr, node, arr, false}
|
||||
sort.Stable(sortable)
|
||||
if sortable.hasError {
|
||||
return nil, errors.New("error in sort_by comparison")
|
||||
}
|
||||
return arr, nil
|
||||
} else if _, ok := start.(string); ok {
|
||||
sortable := &byExprString{intr, node, arr, false}
|
||||
sort.Stable(sortable)
|
||||
if sortable.hasError {
|
||||
return nil, errors.New("error in sort_by comparison")
|
||||
}
|
||||
return arr, nil
|
||||
} else {
|
||||
return nil, errors.New("invalid type, must be number of string")
|
||||
}
|
||||
}
|
||||
func jpfJoin(arguments []interface{}) (interface{}, error) {
|
||||
sep := arguments[0].(string)
|
||||
// We can't just do arguments[1].([]string), we have to
|
||||
// manually convert each item to a string.
|
||||
arrayStr := []string{}
|
||||
for _, item := range arguments[1].([]interface{}) {
|
||||
arrayStr = append(arrayStr, item.(string))
|
||||
}
|
||||
return strings.Join(arrayStr, sep), nil
|
||||
}
|
||||
func jpfReverse(arguments []interface{}) (interface{}, error) {
|
||||
if s, ok := arguments[0].(string); ok {
|
||||
r := []rune(s)
|
||||
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
return string(r), nil
|
||||
}
|
||||
items := arguments[0].([]interface{})
|
||||
length := len(items)
|
||||
reversed := make([]interface{}, length)
|
||||
for i, item := range items {
|
||||
reversed[length-(i+1)] = item
|
||||
}
|
||||
return reversed, nil
|
||||
}
|
||||
func jpfToArray(arguments []interface{}) (interface{}, error) {
|
||||
if _, ok := arguments[0].([]interface{}); ok {
|
||||
return arguments[0], nil
|
||||
}
|
||||
return arguments[:1:1], nil
|
||||
}
|
||||
func jpfToString(arguments []interface{}) (interface{}, error) {
|
||||
if v, ok := arguments[0].(string); ok {
|
||||
return v, nil
|
||||
}
|
||||
result, err := json.Marshal(arguments[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return string(result), nil
|
||||
}
|
||||
func jpfToNumber(arguments []interface{}) (interface{}, error) {
|
||||
arg := arguments[0]
|
||||
if v, ok := arg.(float64); ok {
|
||||
return v, nil
|
||||
}
|
||||
if v, ok := arg.(string); ok {
|
||||
conv, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return conv, nil
|
||||
}
|
||||
if _, ok := arg.([]interface{}); ok {
|
||||
return nil, nil
|
||||
}
|
||||
if _, ok := arg.(map[string]interface{}); ok {
|
||||
return nil, nil
|
||||
}
|
||||
if arg == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if arg == true || arg == false {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.New("unknown type")
|
||||
}
|
||||
func jpfNotNull(arguments []interface{}) (interface{}, error) {
|
||||
for _, arg := range arguments {
|
||||
if arg != nil {
|
||||
return arg, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
418
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go
generated
vendored
Normal file
418
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go
generated
vendored
Normal file
|
@ -0,0 +1,418 @@
|
|||
package jmespath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
/* This is a tree based interpreter. It walks the AST and directly
|
||||
interprets the AST to search through a JSON document.
|
||||
*/
|
||||
|
||||
type treeInterpreter struct {
|
||||
fCall *functionCaller
|
||||
}
|
||||
|
||||
func newInterpreter() *treeInterpreter {
|
||||
interpreter := treeInterpreter{}
|
||||
interpreter.fCall = newFunctionCaller()
|
||||
return &interpreter
|
||||
}
|
||||
|
||||
type expRef struct {
|
||||
ref ASTNode
|
||||
}
|
||||
|
||||
// Execute takes an ASTNode and input data and interprets the AST directly.
|
||||
// It will produce the result of applying the JMESPath expression associated
|
||||
// with the ASTNode to the input data "value".
|
||||
func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
|
||||
switch node.nodeType {
|
||||
case ASTComparator:
|
||||
left, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
right, err := intr.Execute(node.children[1], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch node.value {
|
||||
case tEQ:
|
||||
return objsEqual(left, right), nil
|
||||
case tNE:
|
||||
return !objsEqual(left, right), nil
|
||||
}
|
||||
leftNum, ok := left.(float64)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
rightNum, ok := right.(float64)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
switch node.value {
|
||||
case tGT:
|
||||
return leftNum > rightNum, nil
|
||||
case tGTE:
|
||||
return leftNum >= rightNum, nil
|
||||
case tLT:
|
||||
return leftNum < rightNum, nil
|
||||
case tLTE:
|
||||
return leftNum <= rightNum, nil
|
||||
}
|
||||
case ASTExpRef:
|
||||
return expRef{ref: node.children[0]}, nil
|
||||
case ASTFunctionExpression:
|
||||
resolvedArgs := []interface{}{}
|
||||
for _, arg := range node.children {
|
||||
current, err := intr.Execute(arg, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolvedArgs = append(resolvedArgs, current)
|
||||
}
|
||||
return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
|
||||
case ASTField:
|
||||
if m, ok := value.(map[string]interface{}); ok {
|
||||
key := node.value.(string)
|
||||
return m[key], nil
|
||||
}
|
||||
return intr.fieldFromStruct(node.value.(string), value)
|
||||
case ASTFilterProjection:
|
||||
left, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
sliceType, ok := left.([]interface{})
|
||||
if !ok {
|
||||
if isSliceType(left) {
|
||||
return intr.filterProjectionWithReflection(node, left)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
compareNode := node.children[2]
|
||||
collected := []interface{}{}
|
||||
for _, element := range sliceType {
|
||||
result, err := intr.Execute(compareNode, element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isFalse(result) {
|
||||
current, err := intr.Execute(node.children[1], element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if current != nil {
|
||||
collected = append(collected, current)
|
||||
}
|
||||
}
|
||||
}
|
||||
return collected, nil
|
||||
case ASTFlatten:
|
||||
left, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
sliceType, ok := left.([]interface{})
|
||||
if !ok {
|
||||
// If we can't type convert to []interface{}, there's
|
||||
// a chance this could still work via reflection if we're
|
||||
// dealing with user provided types.
|
||||
if isSliceType(left) {
|
||||
return intr.flattenWithReflection(left)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
flattened := []interface{}{}
|
||||
for _, element := range sliceType {
|
||||
if elementSlice, ok := element.([]interface{}); ok {
|
||||
flattened = append(flattened, elementSlice...)
|
||||
} else if isSliceType(element) {
|
||||
reflectFlat := []interface{}{}
|
||||
v := reflect.ValueOf(element)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
reflectFlat = append(reflectFlat, v.Index(i).Interface())
|
||||
}
|
||||
flattened = append(flattened, reflectFlat...)
|
||||
} else {
|
||||
flattened = append(flattened, element)
|
||||
}
|
||||
}
|
||||
return flattened, nil
|
||||
case ASTIdentity, ASTCurrentNode:
|
||||
return value, nil
|
||||
case ASTIndex:
|
||||
if sliceType, ok := value.([]interface{}); ok {
|
||||
index := node.value.(int)
|
||||
if index < 0 {
|
||||
index += len(sliceType)
|
||||
}
|
||||
if index < len(sliceType) && index >= 0 {
|
||||
return sliceType[index], nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
// Otherwise try via reflection.
|
||||
rv := reflect.ValueOf(value)
|
||||
if rv.Kind() == reflect.Slice {
|
||||
index := node.value.(int)
|
||||
if index < 0 {
|
||||
index += rv.Len()
|
||||
}
|
||||
if index < rv.Len() && index >= 0 {
|
||||
v := rv.Index(index)
|
||||
return v.Interface(), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
case ASTKeyValPair:
|
||||
return intr.Execute(node.children[0], value)
|
||||
case ASTLiteral:
|
||||
return node.value, nil
|
||||
case ASTMultiSelectHash:
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
collected := make(map[string]interface{})
|
||||
for _, child := range node.children {
|
||||
current, err := intr.Execute(child, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := child.value.(string)
|
||||
collected[key] = current
|
||||
}
|
||||
return collected, nil
|
||||
case ASTMultiSelectList:
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
collected := []interface{}{}
|
||||
for _, child := range node.children {
|
||||
current, err := intr.Execute(child, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collected = append(collected, current)
|
||||
}
|
||||
return collected, nil
|
||||
case ASTOrExpression:
|
||||
matched, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isFalse(matched) {
|
||||
matched, err = intr.Execute(node.children[1], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return matched, nil
|
||||
case ASTAndExpression:
|
||||
matched, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isFalse(matched) {
|
||||
return matched, nil
|
||||
}
|
||||
return intr.Execute(node.children[1], value)
|
||||
case ASTNotExpression:
|
||||
matched, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isFalse(matched) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
case ASTPipe:
|
||||
result := value
|
||||
var err error
|
||||
for _, child := range node.children {
|
||||
result, err = intr.Execute(child, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
case ASTProjection:
|
||||
left, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sliceType, ok := left.([]interface{})
|
||||
if !ok {
|
||||
if isSliceType(left) {
|
||||
return intr.projectWithReflection(node, left)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
collected := []interface{}{}
|
||||
var current interface{}
|
||||
for _, element := range sliceType {
|
||||
current, err = intr.Execute(node.children[1], element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if current != nil {
|
||||
collected = append(collected, current)
|
||||
}
|
||||
}
|
||||
return collected, nil
|
||||
case ASTSubexpression, ASTIndexExpression:
|
||||
left, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return intr.Execute(node.children[1], left)
|
||||
case ASTSlice:
|
||||
sliceType, ok := value.([]interface{})
|
||||
if !ok {
|
||||
if isSliceType(value) {
|
||||
return intr.sliceWithReflection(node, value)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
parts := node.value.([]*int)
|
||||
sliceParams := make([]sliceParam, 3)
|
||||
for i, part := range parts {
|
||||
if part != nil {
|
||||
sliceParams[i].Specified = true
|
||||
sliceParams[i].N = *part
|
||||
}
|
||||
}
|
||||
return slice(sliceType, sliceParams)
|
||||
case ASTValueProjection:
|
||||
left, err := intr.Execute(node.children[0], value)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
mapType, ok := left.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
values := make([]interface{}, len(mapType))
|
||||
for _, value := range mapType {
|
||||
values = append(values, value)
|
||||
}
|
||||
collected := []interface{}{}
|
||||
for _, element := range values {
|
||||
current, err := intr.Execute(node.children[1], element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if current != nil {
|
||||
collected = append(collected, current)
|
||||
}
|
||||
}
|
||||
return collected, nil
|
||||
}
|
||||
return nil, errors.New("Unknown AST node: " + node.nodeType.String())
|
||||
}
|
||||
|
||||
func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
|
||||
rv := reflect.ValueOf(value)
|
||||
first, n := utf8.DecodeRuneInString(key)
|
||||
fieldName := string(unicode.ToUpper(first)) + key[n:]
|
||||
if rv.Kind() == reflect.Struct {
|
||||
v := rv.FieldByName(fieldName)
|
||||
if !v.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
return v.Interface(), nil
|
||||
} else if rv.Kind() == reflect.Ptr {
|
||||
// Handle multiple levels of indirection?
|
||||
if rv.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
rv = rv.Elem()
|
||||
v := rv.FieldByName(fieldName)
|
||||
if !v.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(value)
|
||||
flattened := []interface{}{}
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
element := v.Index(i).Interface()
|
||||
if reflect.TypeOf(element).Kind() == reflect.Slice {
|
||||
// Then insert the contents of the element
|
||||
// slice into the flattened slice,
|
||||
// i.e flattened = append(flattened, mySlice...)
|
||||
elementV := reflect.ValueOf(element)
|
||||
for j := 0; j < elementV.Len(); j++ {
|
||||
flattened = append(
|
||||
flattened, elementV.Index(j).Interface())
|
||||
}
|
||||
} else {
|
||||
flattened = append(flattened, element)
|
||||
}
|
||||
}
|
||||
return flattened, nil
|
||||
}
|
||||
|
||||
func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(value)
|
||||
parts := node.value.([]*int)
|
||||
sliceParams := make([]sliceParam, 3)
|
||||
for i, part := range parts {
|
||||
if part != nil {
|
||||
sliceParams[i].Specified = true
|
||||
sliceParams[i].N = *part
|
||||
}
|
||||
}
|
||||
final := []interface{}{}
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
element := v.Index(i).Interface()
|
||||
final = append(final, element)
|
||||
}
|
||||
return slice(final, sliceParams)
|
||||
}
|
||||
|
||||
func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
|
||||
compareNode := node.children[2]
|
||||
collected := []interface{}{}
|
||||
v := reflect.ValueOf(value)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
element := v.Index(i).Interface()
|
||||
result, err := intr.Execute(compareNode, element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isFalse(result) {
|
||||
current, err := intr.Execute(node.children[1], element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if current != nil {
|
||||
collected = append(collected, current)
|
||||
}
|
||||
}
|
||||
}
|
||||
return collected, nil
|
||||
}
|
||||
|
||||
func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
|
||||
collected := []interface{}{}
|
||||
v := reflect.ValueOf(value)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
element := v.Index(i).Interface()
|
||||
result, err := intr.Execute(node.children[1], element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result != nil {
|
||||
collected = append(collected, result)
|
||||
}
|
||||
}
|
||||
return collected, nil
|
||||
}
|
420
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go
generated
vendored
Normal file
420
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go
generated
vendored
Normal file
|
@ -0,0 +1,420 @@
|
|||
package jmespath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type token struct {
|
||||
tokenType tokType
|
||||
value string
|
||||
position int
|
||||
length int
|
||||
}
|
||||
|
||||
type tokType int
|
||||
|
||||
const eof = -1
|
||||
|
||||
// Lexer contains information about the expression being tokenized.
|
||||
type Lexer struct {
|
||||
expression string // The expression provided by the user.
|
||||
currentPos int // The current position in the string.
|
||||
lastWidth int // The width of the current rune. This
|
||||
buf bytes.Buffer // Internal buffer used for building up values.
|
||||
}
|
||||
|
||||
// SyntaxError is the main error used whenever a lexing or parsing error occurs.
|
||||
type SyntaxError struct {
|
||||
msg string // Error message displayed to user
|
||||
Expression string // Expression that generated a SyntaxError
|
||||
Offset int // The location in the string where the error occurred
|
||||
}
|
||||
|
||||
func (e SyntaxError) Error() string {
|
||||
// In the future, it would be good to underline the specific
|
||||
// location where the error occurred.
|
||||
return "SyntaxError: " + e.msg
|
||||
}
|
||||
|
||||
// HighlightLocation will show where the syntax error occurred.
|
||||
// It will place a "^" character on a line below the expression
|
||||
// at the point where the syntax error occurred.
|
||||
func (e SyntaxError) HighlightLocation() string {
|
||||
return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
|
||||
}
|
||||
|
||||
//go:generate stringer -type=tokType
|
||||
const (
|
||||
tUnknown tokType = iota
|
||||
tStar
|
||||
tDot
|
||||
tFilter
|
||||
tFlatten
|
||||
tLparen
|
||||
tRparen
|
||||
tLbracket
|
||||
tRbracket
|
||||
tLbrace
|
||||
tRbrace
|
||||
tOr
|
||||
tPipe
|
||||
tNumber
|
||||
tUnquotedIdentifier
|
||||
tQuotedIdentifier
|
||||
tComma
|
||||
tColon
|
||||
tLT
|
||||
tLTE
|
||||
tGT
|
||||
tGTE
|
||||
tEQ
|
||||
tNE
|
||||
tJSONLiteral
|
||||
tStringLiteral
|
||||
tCurrent
|
||||
tExpref
|
||||
tAnd
|
||||
tNot
|
||||
tEOF
|
||||
)
|
||||
|
||||
var basicTokens = map[rune]tokType{
|
||||
'.': tDot,
|
||||
'*': tStar,
|
||||
',': tComma,
|
||||
':': tColon,
|
||||
'{': tLbrace,
|
||||
'}': tRbrace,
|
||||
']': tRbracket, // tLbracket not included because it could be "[]"
|
||||
'(': tLparen,
|
||||
')': tRparen,
|
||||
'@': tCurrent,
|
||||
}
|
||||
|
||||
// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
|
||||
// When using this bitmask just be sure to shift the rune down 64 bits
|
||||
// before checking against identifierStartBits.
|
||||
const identifierStartBits uint64 = 576460745995190270
|
||||
|
||||
// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
|
||||
var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
|
||||
|
||||
var whiteSpace = map[rune]bool{
|
||||
' ': true, '\t': true, '\n': true, '\r': true,
|
||||
}
|
||||
|
||||
func (t token) String() string {
|
||||
return fmt.Sprintf("Token{%+v, %s, %d, %d}",
|
||||
t.tokenType, t.value, t.position, t.length)
|
||||
}
|
||||
|
||||
// NewLexer creates a new JMESPath lexer.
|
||||
func NewLexer() *Lexer {
|
||||
lexer := Lexer{}
|
||||
return &lexer
|
||||
}
|
||||
|
||||
func (lexer *Lexer) next() rune {
|
||||
if lexer.currentPos >= len(lexer.expression) {
|
||||
lexer.lastWidth = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
|
||||
lexer.lastWidth = w
|
||||
lexer.currentPos += w
|
||||
return r
|
||||
}
|
||||
|
||||
func (lexer *Lexer) back() {
|
||||
lexer.currentPos -= lexer.lastWidth
|
||||
}
|
||||
|
||||
func (lexer *Lexer) peek() rune {
|
||||
t := lexer.next()
|
||||
lexer.back()
|
||||
return t
|
||||
}
|
||||
|
||||
// tokenize takes an expression and returns corresponding tokens.
|
||||
func (lexer *Lexer) tokenize(expression string) ([]token, error) {
|
||||
var tokens []token
|
||||
lexer.expression = expression
|
||||
lexer.currentPos = 0
|
||||
lexer.lastWidth = 0
|
||||
loop:
|
||||
for {
|
||||
r := lexer.next()
|
||||
if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
|
||||
t := lexer.consumeUnquotedIdentifier()
|
||||
tokens = append(tokens, t)
|
||||
} else if val, ok := basicTokens[r]; ok {
|
||||
// Basic single char token.
|
||||
t := token{
|
||||
tokenType: val,
|
||||
value: string(r),
|
||||
position: lexer.currentPos - lexer.lastWidth,
|
||||
length: 1,
|
||||
}
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '-' || (r >= '0' && r <= '9') {
|
||||
t := lexer.consumeNumber()
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '[' {
|
||||
t := lexer.consumeLBracket()
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '"' {
|
||||
t, err := lexer.consumeQuotedIdentifier()
|
||||
if err != nil {
|
||||
return tokens, err
|
||||
}
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '\'' {
|
||||
t, err := lexer.consumeRawStringLiteral()
|
||||
if err != nil {
|
||||
return tokens, err
|
||||
}
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '`' {
|
||||
t, err := lexer.consumeLiteral()
|
||||
if err != nil {
|
||||
return tokens, err
|
||||
}
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '|' {
|
||||
t := lexer.matchOrElse(r, '|', tOr, tPipe)
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '<' {
|
||||
t := lexer.matchOrElse(r, '=', tLTE, tLT)
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '>' {
|
||||
t := lexer.matchOrElse(r, '=', tGTE, tGT)
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '!' {
|
||||
t := lexer.matchOrElse(r, '=', tNE, tNot)
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '=' {
|
||||
t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
|
||||
tokens = append(tokens, t)
|
||||
} else if r == '&' {
|
||||
t := lexer.matchOrElse(r, '&', tAnd, tExpref)
|
||||
tokens = append(tokens, t)
|
||||
} else if r == eof {
|
||||
break loop
|
||||
} else if _, ok := whiteSpace[r]; ok {
|
||||
// Ignore whitespace
|
||||
} else {
|
||||
return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
// Consume characters until the ending rune "r" is reached.
|
||||
// If the end of the expression is reached before seeing the
|
||||
// terminating rune "r", then an error is returned.
|
||||
// If no error occurs then the matching substring is returned.
|
||||
// The returned string will not include the ending rune.
|
||||
func (lexer *Lexer) consumeUntil(end rune) (string, error) {
|
||||
start := lexer.currentPos
|
||||
current := lexer.next()
|
||||
for current != end && current != eof {
|
||||
if current == '\\' && lexer.peek() != eof {
|
||||
lexer.next()
|
||||
}
|
||||
current = lexer.next()
|
||||
}
|
||||
if lexer.lastWidth == 0 {
|
||||
// Then we hit an EOF so we never reached the closing
|
||||
// delimiter.
|
||||
return "", SyntaxError{
|
||||
msg: "Unclosed delimiter: " + string(end),
|
||||
Expression: lexer.expression,
|
||||
Offset: len(lexer.expression),
|
||||
}
|
||||
}
|
||||
return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
|
||||
}
|
||||
|
||||
func (lexer *Lexer) consumeLiteral() (token, error) {
|
||||
start := lexer.currentPos
|
||||
value, err := lexer.consumeUntil('`')
|
||||
if err != nil {
|
||||
return token{}, err
|
||||
}
|
||||
value = strings.Replace(value, "\\`", "`", -1)
|
||||
return token{
|
||||
tokenType: tJSONLiteral,
|
||||
value: value,
|
||||
position: start,
|
||||
length: len(value),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
|
||||
start := lexer.currentPos
|
||||
currentIndex := start
|
||||
current := lexer.next()
|
||||
for current != '\'' && lexer.peek() != eof {
|
||||
if current == '\\' && lexer.peek() == '\'' {
|
||||
chunk := lexer.expression[currentIndex : lexer.currentPos-1]
|
||||
lexer.buf.WriteString(chunk)
|
||||
lexer.buf.WriteString("'")
|
||||
lexer.next()
|
||||
currentIndex = lexer.currentPos
|
||||
}
|
||||
current = lexer.next()
|
||||
}
|
||||
if lexer.lastWidth == 0 {
|
||||
// Then we hit an EOF so we never reached the closing
|
||||
// delimiter.
|
||||
return token{}, SyntaxError{
|
||||
msg: "Unclosed delimiter: '",
|
||||
Expression: lexer.expression,
|
||||
Offset: len(lexer.expression),
|
||||
}
|
||||
}
|
||||
if currentIndex < lexer.currentPos {
|
||||
lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
|
||||
}
|
||||
value := lexer.buf.String()
|
||||
// Reset the buffer so it can reused again.
|
||||
lexer.buf.Reset()
|
||||
return token{
|
||||
tokenType: tStringLiteral,
|
||||
value: value,
|
||||
position: start,
|
||||
length: len(value),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (lexer *Lexer) syntaxError(msg string) SyntaxError {
|
||||
return SyntaxError{
|
||||
msg: msg,
|
||||
Expression: lexer.expression,
|
||||
Offset: lexer.currentPos - 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks for a two char token, otherwise matches a single character
|
||||
// token. This is used whenever a two char token overlaps a single
|
||||
// char token, e.g. "||" -> tPipe, "|" -> tOr.
|
||||
func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
|
||||
start := lexer.currentPos - lexer.lastWidth
|
||||
nextRune := lexer.next()
|
||||
var t token
|
||||
if nextRune == second {
|
||||
t = token{
|
||||
tokenType: matchedType,
|
||||
value: string(first) + string(second),
|
||||
position: start,
|
||||
length: 2,
|
||||
}
|
||||
} else {
|
||||
lexer.back()
|
||||
t = token{
|
||||
tokenType: singleCharType,
|
||||
value: string(first),
|
||||
position: start,
|
||||
length: 1,
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (lexer *Lexer) consumeLBracket() token {
|
||||
// There's three options here:
|
||||
// 1. A filter expression "[?"
|
||||
// 2. A flatten operator "[]"
|
||||
// 3. A bare rbracket "["
|
||||
start := lexer.currentPos - lexer.lastWidth
|
||||
nextRune := lexer.next()
|
||||
var t token
|
||||
if nextRune == '?' {
|
||||
t = token{
|
||||
tokenType: tFilter,
|
||||
value: "[?",
|
||||
position: start,
|
||||
length: 2,
|
||||
}
|
||||
} else if nextRune == ']' {
|
||||
t = token{
|
||||
tokenType: tFlatten,
|
||||
value: "[]",
|
||||
position: start,
|
||||
length: 2,
|
||||
}
|
||||
} else {
|
||||
t = token{
|
||||
tokenType: tLbracket,
|
||||
value: "[",
|
||||
position: start,
|
||||
length: 1,
|
||||
}
|
||||
lexer.back()
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
|
||||
start := lexer.currentPos
|
||||
value, err := lexer.consumeUntil('"')
|
||||
if err != nil {
|
||||
return token{}, err
|
||||
}
|
||||
var decoded string
|
||||
asJSON := []byte("\"" + value + "\"")
|
||||
if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
|
||||
return token{}, err
|
||||
}
|
||||
return token{
|
||||
tokenType: tQuotedIdentifier,
|
||||
value: decoded,
|
||||
position: start - 1,
|
||||
length: len(decoded),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (lexer *Lexer) consumeUnquotedIdentifier() token {
|
||||
// Consume runes until we reach the end of an unquoted
|
||||
// identifier.
|
||||
start := lexer.currentPos - lexer.lastWidth
|
||||
for {
|
||||
r := lexer.next()
|
||||
if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
|
||||
lexer.back()
|
||||
break
|
||||
}
|
||||
}
|
||||
value := lexer.expression[start:lexer.currentPos]
|
||||
return token{
|
||||
tokenType: tUnquotedIdentifier,
|
||||
value: value,
|
||||
position: start,
|
||||
length: lexer.currentPos - start,
|
||||
}
|
||||
}
|
||||
|
||||
func (lexer *Lexer) consumeNumber() token {
|
||||
// Consume runes until we reach something that's not a number.
|
||||
start := lexer.currentPos - lexer.lastWidth
|
||||
for {
|
||||
r := lexer.next()
|
||||
if r < '0' || r > '9' {
|
||||
lexer.back()
|
||||
break
|
||||
}
|
||||
}
|
||||
value := lexer.expression[start:lexer.currentPos]
|
||||
return token{
|
||||
tokenType: tNumber,
|
||||
value: value,
|
||||
position: start,
|
||||
length: lexer.currentPos - start,
|
||||
}
|
||||
}
|
603
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go
generated
vendored
Normal file
603
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,603 @@
|
|||
package jmespath
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type astNodeType int
|
||||
|
||||
//go:generate stringer -type astNodeType
|
||||
const (
|
||||
ASTEmpty astNodeType = iota
|
||||
ASTComparator
|
||||
ASTCurrentNode
|
||||
ASTExpRef
|
||||
ASTFunctionExpression
|
||||
ASTField
|
||||
ASTFilterProjection
|
||||
ASTFlatten
|
||||
ASTIdentity
|
||||
ASTIndex
|
||||
ASTIndexExpression
|
||||
ASTKeyValPair
|
||||
ASTLiteral
|
||||
ASTMultiSelectHash
|
||||
ASTMultiSelectList
|
||||
ASTOrExpression
|
||||
ASTAndExpression
|
||||
ASTNotExpression
|
||||
ASTPipe
|
||||
ASTProjection
|
||||
ASTSubexpression
|
||||
ASTSlice
|
||||
ASTValueProjection
|
||||
)
|
||||
|
||||
// ASTNode represents the abstract syntax tree of a JMESPath expression.
|
||||
type ASTNode struct {
|
||||
nodeType astNodeType
|
||||
value interface{}
|
||||
children []ASTNode
|
||||
}
|
||||
|
||||
func (node ASTNode) String() string {
|
||||
return node.PrettyPrint(0)
|
||||
}
|
||||
|
||||
// PrettyPrint will pretty print the parsed AST.
|
||||
// The AST is an implementation detail and this pretty print
|
||||
// function is provided as a convenience method to help with
|
||||
// debugging. You should not rely on its output as the internal
|
||||
// structure of the AST may change at any time.
|
||||
func (node ASTNode) PrettyPrint(indent int) string {
|
||||
spaces := strings.Repeat(" ", indent)
|
||||
output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
|
||||
nextIndent := indent + 2
|
||||
if node.value != nil {
|
||||
if converted, ok := node.value.(fmt.Stringer); ok {
|
||||
// Account for things like comparator nodes
|
||||
// that are enums with a String() method.
|
||||
output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
|
||||
} else {
|
||||
output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
|
||||
}
|
||||
}
|
||||
lastIndex := len(node.children)
|
||||
if lastIndex > 0 {
|
||||
output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
|
||||
childIndent := nextIndent + 2
|
||||
for _, elem := range node.children {
|
||||
output += elem.PrettyPrint(childIndent)
|
||||
}
|
||||
}
|
||||
output += fmt.Sprintf("%s}\n", spaces)
|
||||
return output
|
||||
}
|
||||
|
||||
var bindingPowers = map[tokType]int{
|
||||
tEOF: 0,
|
||||
tUnquotedIdentifier: 0,
|
||||
tQuotedIdentifier: 0,
|
||||
tRbracket: 0,
|
||||
tRparen: 0,
|
||||
tComma: 0,
|
||||
tRbrace: 0,
|
||||
tNumber: 0,
|
||||
tCurrent: 0,
|
||||
tExpref: 0,
|
||||
tColon: 0,
|
||||
tPipe: 1,
|
||||
tOr: 2,
|
||||
tAnd: 3,
|
||||
tEQ: 5,
|
||||
tLT: 5,
|
||||
tLTE: 5,
|
||||
tGT: 5,
|
||||
tGTE: 5,
|
||||
tNE: 5,
|
||||
tFlatten: 9,
|
||||
tStar: 20,
|
||||
tFilter: 21,
|
||||
tDot: 40,
|
||||
tNot: 45,
|
||||
tLbrace: 50,
|
||||
tLbracket: 55,
|
||||
tLparen: 60,
|
||||
}
|
||||
|
||||
// Parser holds state about the current expression being parsed.
|
||||
type Parser struct {
|
||||
expression string
|
||||
tokens []token
|
||||
index int
|
||||
}
|
||||
|
||||
// NewParser creates a new JMESPath parser.
|
||||
func NewParser() *Parser {
|
||||
p := Parser{}
|
||||
return &p
|
||||
}
|
||||
|
||||
// Parse will compile a JMESPath expression.
|
||||
func (p *Parser) Parse(expression string) (ASTNode, error) {
|
||||
lexer := NewLexer()
|
||||
p.expression = expression
|
||||
p.index = 0
|
||||
tokens, err := lexer.tokenize(expression)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
p.tokens = tokens
|
||||
parsed, err := p.parseExpression(0)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
if p.current() != tEOF {
|
||||
return ASTNode{}, p.syntaxError(fmt.Sprintf(
|
||||
"Unexpected token at the end of the expresssion: %s", p.current()))
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
|
||||
var err error
|
||||
leftToken := p.lookaheadToken(0)
|
||||
p.advance()
|
||||
leftNode, err := p.nud(leftToken)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
currentToken := p.current()
|
||||
for bindingPower < bindingPowers[currentToken] {
|
||||
p.advance()
|
||||
leftNode, err = p.led(currentToken, leftNode)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
currentToken = p.current()
|
||||
}
|
||||
return leftNode, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseIndexExpression() (ASTNode, error) {
|
||||
if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
|
||||
return p.parseSliceExpression()
|
||||
}
|
||||
indexStr := p.lookaheadToken(0).value
|
||||
parsedInt, err := strconv.Atoi(indexStr)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
|
||||
p.advance()
|
||||
if err := p.match(tRbracket); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return indexNode, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseSliceExpression() (ASTNode, error) {
|
||||
parts := []*int{nil, nil, nil}
|
||||
index := 0
|
||||
current := p.current()
|
||||
for current != tRbracket && index < 3 {
|
||||
if current == tColon {
|
||||
index++
|
||||
p.advance()
|
||||
} else if current == tNumber {
|
||||
parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
parts[index] = &parsedInt
|
||||
p.advance()
|
||||
} else {
|
||||
return ASTNode{}, p.syntaxError(
|
||||
"Expected tColon or tNumber" + ", received: " + p.current().String())
|
||||
}
|
||||
current = p.current()
|
||||
}
|
||||
if err := p.match(tRbracket); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTSlice,
|
||||
value: parts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Parser) match(tokenType tokType) error {
|
||||
if p.current() == tokenType {
|
||||
p.advance()
|
||||
return nil
|
||||
}
|
||||
return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
|
||||
}
|
||||
|
||||
func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
|
||||
switch tokenType {
|
||||
case tDot:
|
||||
if p.current() != tStar {
|
||||
right, err := p.parseDotRHS(bindingPowers[tDot])
|
||||
return ASTNode{
|
||||
nodeType: ASTSubexpression,
|
||||
children: []ASTNode{node, right},
|
||||
}, err
|
||||
}
|
||||
p.advance()
|
||||
right, err := p.parseProjectionRHS(bindingPowers[tDot])
|
||||
return ASTNode{
|
||||
nodeType: ASTValueProjection,
|
||||
children: []ASTNode{node, right},
|
||||
}, err
|
||||
case tPipe:
|
||||
right, err := p.parseExpression(bindingPowers[tPipe])
|
||||
return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
|
||||
case tOr:
|
||||
right, err := p.parseExpression(bindingPowers[tOr])
|
||||
return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
|
||||
case tAnd:
|
||||
right, err := p.parseExpression(bindingPowers[tAnd])
|
||||
return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
|
||||
case tLparen:
|
||||
name := node.value
|
||||
var args []ASTNode
|
||||
for p.current() != tRparen {
|
||||
expression, err := p.parseExpression(0)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
if p.current() == tComma {
|
||||
if err := p.match(tComma); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
}
|
||||
args = append(args, expression)
|
||||
}
|
||||
if err := p.match(tRparen); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTFunctionExpression,
|
||||
value: name,
|
||||
children: args,
|
||||
}, nil
|
||||
case tFilter:
|
||||
return p.parseFilter(node)
|
||||
case tFlatten:
|
||||
left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
|
||||
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
|
||||
return ASTNode{
|
||||
nodeType: ASTProjection,
|
||||
children: []ASTNode{left, right},
|
||||
}, err
|
||||
case tEQ, tNE, tGT, tGTE, tLT, tLTE:
|
||||
right, err := p.parseExpression(bindingPowers[tokenType])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTComparator,
|
||||
value: tokenType,
|
||||
children: []ASTNode{node, right},
|
||||
}, nil
|
||||
case tLbracket:
|
||||
tokenType := p.current()
|
||||
var right ASTNode
|
||||
var err error
|
||||
if tokenType == tNumber || tokenType == tColon {
|
||||
right, err = p.parseIndexExpression()
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return p.projectIfSlice(node, right)
|
||||
}
|
||||
// Otherwise this is a projection.
|
||||
if err := p.match(tStar); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
if err := p.match(tRbracket); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
right, err = p.parseProjectionRHS(bindingPowers[tStar])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTProjection,
|
||||
children: []ASTNode{node, right},
|
||||
}, nil
|
||||
}
|
||||
return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
|
||||
}
|
||||
|
||||
func (p *Parser) nud(token token) (ASTNode, error) {
|
||||
switch token.tokenType {
|
||||
case tJSONLiteral:
|
||||
var parsed interface{}
|
||||
err := json.Unmarshal([]byte(token.value), &parsed)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
|
||||
case tStringLiteral:
|
||||
return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
|
||||
case tUnquotedIdentifier:
|
||||
return ASTNode{
|
||||
nodeType: ASTField,
|
||||
value: token.value,
|
||||
}, nil
|
||||
case tQuotedIdentifier:
|
||||
node := ASTNode{nodeType: ASTField, value: token.value}
|
||||
if p.current() == tLparen {
|
||||
return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
|
||||
}
|
||||
return node, nil
|
||||
case tStar:
|
||||
left := ASTNode{nodeType: ASTIdentity}
|
||||
var right ASTNode
|
||||
var err error
|
||||
if p.current() == tRbracket {
|
||||
right = ASTNode{nodeType: ASTIdentity}
|
||||
} else {
|
||||
right, err = p.parseProjectionRHS(bindingPowers[tStar])
|
||||
}
|
||||
return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
|
||||
case tFilter:
|
||||
return p.parseFilter(ASTNode{nodeType: ASTIdentity})
|
||||
case tLbrace:
|
||||
return p.parseMultiSelectHash()
|
||||
case tFlatten:
|
||||
left := ASTNode{
|
||||
nodeType: ASTFlatten,
|
||||
children: []ASTNode{{nodeType: ASTIdentity}},
|
||||
}
|
||||
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
|
||||
case tLbracket:
|
||||
tokenType := p.current()
|
||||
//var right ASTNode
|
||||
if tokenType == tNumber || tokenType == tColon {
|
||||
right, err := p.parseIndexExpression()
|
||||
if err != nil {
|
||||
return ASTNode{}, nil
|
||||
}
|
||||
return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
|
||||
} else if tokenType == tStar && p.lookahead(1) == tRbracket {
|
||||
p.advance()
|
||||
p.advance()
|
||||
right, err := p.parseProjectionRHS(bindingPowers[tStar])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTProjection,
|
||||
children: []ASTNode{{nodeType: ASTIdentity}, right},
|
||||
}, nil
|
||||
} else {
|
||||
return p.parseMultiSelectList()
|
||||
}
|
||||
case tCurrent:
|
||||
return ASTNode{nodeType: ASTCurrentNode}, nil
|
||||
case tExpref:
|
||||
expression, err := p.parseExpression(bindingPowers[tExpref])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
|
||||
case tNot:
|
||||
expression, err := p.parseExpression(bindingPowers[tNot])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
|
||||
case tLparen:
|
||||
expression, err := p.parseExpression(0)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
if err := p.match(tRparen); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return expression, nil
|
||||
case tEOF:
|
||||
return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
|
||||
}
|
||||
|
||||
return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
|
||||
}
|
||||
|
||||
func (p *Parser) parseMultiSelectList() (ASTNode, error) {
|
||||
var expressions []ASTNode
|
||||
for {
|
||||
expression, err := p.parseExpression(0)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
expressions = append(expressions, expression)
|
||||
if p.current() == tRbracket {
|
||||
break
|
||||
}
|
||||
err = p.match(tComma)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
}
|
||||
err := p.match(tRbracket)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTMultiSelectList,
|
||||
children: expressions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
|
||||
var children []ASTNode
|
||||
for {
|
||||
keyToken := p.lookaheadToken(0)
|
||||
if err := p.match(tUnquotedIdentifier); err != nil {
|
||||
if err := p.match(tQuotedIdentifier); err != nil {
|
||||
return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
|
||||
}
|
||||
}
|
||||
keyName := keyToken.value
|
||||
err := p.match(tColon)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
value, err := p.parseExpression(0)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
node := ASTNode{
|
||||
nodeType: ASTKeyValPair,
|
||||
value: keyName,
|
||||
children: []ASTNode{value},
|
||||
}
|
||||
children = append(children, node)
|
||||
if p.current() == tComma {
|
||||
err := p.match(tComma)
|
||||
if err != nil {
|
||||
return ASTNode{}, nil
|
||||
}
|
||||
} else if p.current() == tRbrace {
|
||||
err := p.match(tRbrace)
|
||||
if err != nil {
|
||||
return ASTNode{}, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return ASTNode{
|
||||
nodeType: ASTMultiSelectHash,
|
||||
children: children,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
|
||||
indexExpr := ASTNode{
|
||||
nodeType: ASTIndexExpression,
|
||||
children: []ASTNode{left, right},
|
||||
}
|
||||
if right.nodeType == ASTSlice {
|
||||
right, err := p.parseProjectionRHS(bindingPowers[tStar])
|
||||
return ASTNode{
|
||||
nodeType: ASTProjection,
|
||||
children: []ASTNode{indexExpr, right},
|
||||
}, err
|
||||
}
|
||||
return indexExpr, nil
|
||||
}
|
||||
func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
|
||||
var right, condition ASTNode
|
||||
var err error
|
||||
condition, err = p.parseExpression(0)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
if err := p.match(tRbracket); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
if p.current() == tFlatten {
|
||||
right = ASTNode{nodeType: ASTIdentity}
|
||||
} else {
|
||||
right, err = p.parseProjectionRHS(bindingPowers[tFilter])
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ASTNode{
|
||||
nodeType: ASTFilterProjection,
|
||||
children: []ASTNode{node, right, condition},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
|
||||
lookahead := p.current()
|
||||
if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
|
||||
return p.parseExpression(bindingPower)
|
||||
} else if lookahead == tLbracket {
|
||||
if err := p.match(tLbracket); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return p.parseMultiSelectList()
|
||||
} else if lookahead == tLbrace {
|
||||
if err := p.match(tLbrace); err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return p.parseMultiSelectHash()
|
||||
}
|
||||
return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
|
||||
}
|
||||
|
||||
func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
|
||||
current := p.current()
|
||||
if bindingPowers[current] < 10 {
|
||||
return ASTNode{nodeType: ASTIdentity}, nil
|
||||
} else if current == tLbracket {
|
||||
return p.parseExpression(bindingPower)
|
||||
} else if current == tFilter {
|
||||
return p.parseExpression(bindingPower)
|
||||
} else if current == tDot {
|
||||
err := p.match(tDot)
|
||||
if err != nil {
|
||||
return ASTNode{}, err
|
||||
}
|
||||
return p.parseDotRHS(bindingPower)
|
||||
} else {
|
||||
return ASTNode{}, p.syntaxError("Error")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) lookahead(number int) tokType {
|
||||
return p.lookaheadToken(number).tokenType
|
||||
}
|
||||
|
||||
func (p *Parser) current() tokType {
|
||||
return p.lookahead(0)
|
||||
}
|
||||
|
||||
func (p *Parser) lookaheadToken(number int) token {
|
||||
return p.tokens[p.index+number]
|
||||
}
|
||||
|
||||
func (p *Parser) advance() {
|
||||
p.index++
|
||||
}
|
||||
|
||||
func tokensOneOf(elements []tokType, token tokType) bool {
|
||||
for _, elem := range elements {
|
||||
if elem == token {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Parser) syntaxError(msg string) SyntaxError {
|
||||
return SyntaxError{
|
||||
msg: msg,
|
||||
Expression: p.expression,
|
||||
Offset: p.lookaheadToken(0).position,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a SyntaxError based on the provided token.
|
||||
// This differs from syntaxError() which creates a SyntaxError
|
||||
// based on the current lookahead token.
|
||||
func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
|
||||
return SyntaxError{
|
||||
msg: msg,
|
||||
Expression: p.expression,
|
||||
Offset: t.position,
|
||||
}
|
||||
}
|
16
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go
generated
vendored
Normal file
16
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// generated by stringer -type=tokType; DO NOT EDIT
|
||||
|
||||
package jmespath
|
||||
|
||||
import "fmt"
|
||||
|
||||
const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
|
||||
|
||||
var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
|
||||
|
||||
func (i tokType) String() string {
|
||||
if i < 0 || i >= tokType(len(_tokType_index)-1) {
|
||||
return fmt.Sprintf("tokType(%d)", i)
|
||||
}
|
||||
return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
|
||||
}
|
185
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go
generated
vendored
Normal file
185
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
package jmespath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// IsFalse determines if an object is false based on the JMESPath spec.
|
||||
// JMESPath defines false values to be any of:
|
||||
// - An empty string array, or hash.
|
||||
// - The boolean value false.
|
||||
// - nil
|
||||
func isFalse(value interface{}) bool {
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
return !v
|
||||
case []interface{}:
|
||||
return len(v) == 0
|
||||
case map[string]interface{}:
|
||||
return len(v) == 0
|
||||
case string:
|
||||
return len(v) == 0
|
||||
case nil:
|
||||
return true
|
||||
}
|
||||
// Try the reflection cases before returning false.
|
||||
rv := reflect.ValueOf(value)
|
||||
switch rv.Kind() {
|
||||
case reflect.Struct:
|
||||
// A struct type will never be false, even if
|
||||
// all of its values are the zero type.
|
||||
return false
|
||||
case reflect.Slice, reflect.Map:
|
||||
return rv.Len() == 0
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return true
|
||||
}
|
||||
// If it's a pointer type, we'll try to deref the pointer
|
||||
// and evaluate the pointer value for isFalse.
|
||||
element := rv.Elem()
|
||||
return isFalse(element.Interface())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ObjsEqual is a generic object equality check.
|
||||
// It will take two arbitrary objects and recursively determine
|
||||
// if they are equal.
|
||||
func objsEqual(left interface{}, right interface{}) bool {
|
||||
return reflect.DeepEqual(left, right)
|
||||
}
|
||||
|
||||
// SliceParam refers to a single part of a slice.
|
||||
// A slice consists of a start, a stop, and a step, similar to
|
||||
// python slices.
|
||||
type sliceParam struct {
|
||||
N int
|
||||
Specified bool
|
||||
}
|
||||
|
||||
// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
|
||||
func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
|
||||
computed, err := computeSliceParams(len(slice), parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start, stop, step := computed[0], computed[1], computed[2]
|
||||
result := []interface{}{}
|
||||
if step > 0 {
|
||||
for i := start; i < stop; i += step {
|
||||
result = append(result, slice[i])
|
||||
}
|
||||
} else {
|
||||
for i := start; i > stop; i += step {
|
||||
result = append(result, slice[i])
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
|
||||
var start, stop, step int
|
||||
if !parts[2].Specified {
|
||||
step = 1
|
||||
} else if parts[2].N == 0 {
|
||||
return nil, errors.New("Invalid slice, step cannot be 0")
|
||||
} else {
|
||||
step = parts[2].N
|
||||
}
|
||||
var stepValueNegative bool
|
||||
if step < 0 {
|
||||
stepValueNegative = true
|
||||
} else {
|
||||
stepValueNegative = false
|
||||
}
|
||||
|
||||
if !parts[0].Specified {
|
||||
if stepValueNegative {
|
||||
start = length - 1
|
||||
} else {
|
||||
start = 0
|
||||
}
|
||||
} else {
|
||||
start = capSlice(length, parts[0].N, step)
|
||||
}
|
||||
|
||||
if !parts[1].Specified {
|
||||
if stepValueNegative {
|
||||
stop = -1
|
||||
} else {
|
||||
stop = length
|
||||
}
|
||||
} else {
|
||||
stop = capSlice(length, parts[1].N, step)
|
||||
}
|
||||
return []int{start, stop, step}, nil
|
||||
}
|
||||
|
||||
func capSlice(length int, actual int, step int) int {
|
||||
if actual < 0 {
|
||||
actual += length
|
||||
if actual < 0 {
|
||||
if step < 0 {
|
||||
actual = -1
|
||||
} else {
|
||||
actual = 0
|
||||
}
|
||||
}
|
||||
} else if actual >= length {
|
||||
if step < 0 {
|
||||
actual = length - 1
|
||||
} else {
|
||||
actual = length
|
||||
}
|
||||
}
|
||||
return actual
|
||||
}
|
||||
|
||||
// ToArrayNum converts an empty interface type to a slice of float64.
|
||||
// If any element in the array cannot be converted, then nil is returned
|
||||
// along with a second value of false.
|
||||
func toArrayNum(data interface{}) ([]float64, bool) {
|
||||
// Is there a better way to do this with reflect?
|
||||
if d, ok := data.([]interface{}); ok {
|
||||
result := make([]float64, len(d))
|
||||
for i, el := range d {
|
||||
item, ok := el.(float64)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
result[i] = item
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ToArrayStr converts an empty interface type to a slice of strings.
|
||||
// If any element in the array cannot be converted, then nil is returned
|
||||
// along with a second value of false. If the input data could be entirely
|
||||
// converted, then the converted data, along with a second value of true,
|
||||
// will be returned.
|
||||
func toArrayStr(data interface{}) ([]string, bool) {
|
||||
// Is there a better way to do this with reflect?
|
||||
if d, ok := data.([]interface{}); ok {
|
||||
result := make([]string, len(d))
|
||||
for i, el := range d {
|
||||
item, ok := el.(string)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
result[i] = item
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func isSliceType(v interface{}) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return reflect.TypeOf(v).Kind() == reflect.Slice
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue